code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "spiece.model"}
UpperCamelCase__ = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
UpperCamelCase__ = {"bert_for_seq_generation": 5_12}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Tuple = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[int] = []
lowercase__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : Dict="<s>" , lowercase : Optional[Any]="</s>" , lowercase : str="<unk>" , lowercase : Dict="<pad>" , lowercase : Tuple="<::::>" , lowercase : Optional[Dict[str, Any]] = None , **lowercase : List[str] , ) -> None:
"""simple docstring"""
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sep_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@property
def snake_case__ ( self : Any ) -> str:
"""simple docstring"""
return self.sp_model.get_piece_size()
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str , lowercase : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : Optional[int] , lowercase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowercase , out_type=lowercase )
def snake_case__ ( self : Tuple , lowercase : Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.piece_to_id(lowercase )
def snake_case__ ( self : int , lowercase : int ) -> str:
"""simple docstring"""
__lowercase = self.sp_model.IdToPiece(lowercase )
return token
def snake_case__ ( self : List[Any] , lowercase : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
__lowercase = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def snake_case__ ( self : Optional[int] , lowercase : str , lowercase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 700 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 634 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
lowercase__ : str = "text"
lowercase__ : str = "labels"
def snake_case__ ( self : Tuple , lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
__lowercase = copy.deepcopy(self )
__lowercase = self.label_schema.copy()
__lowercase = features[self.label_column]
__lowercase = label_schema
return task_template
@property
def snake_case__ ( self : Union[str, Any] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 701 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = LxmertTokenizer
lowercase__ : List[str] = LxmertTokenizerFast
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , lowercase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def snake_case__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
| 634 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__lowercase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__lowercase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__lowercase = [1, 0]
def snake_case__ ( self : Union[str, Any] , lowercase : int , lowercase : Tuple , lowercase : int=None , lowercase : int=None , lowercase : List[str]=None , lowercase : bool = True , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = hidden_states
__lowercase = []
__lowercase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__lowercase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__lowercase = self.transformer_index_for_condition[i]
__lowercase = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__lowercase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__lowercase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase )
| 702 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
__lowercase = len(lowercase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowercase : str ) -> List[str]:
"""simple docstring"""
__lowercase = data
__lowercase = [0X6745_2301, 0Xefcd_ab89, 0X98ba_dcfe, 0X1032_5476, 0Xc3d2_e1f0]
@staticmethod
def snake_case__ ( lowercase : Optional[int] , lowercase : Any ) -> Dict:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0Xffff_ffff
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64)
__lowercase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def snake_case__ ( self : Dict , lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = list(struct.unpack(""">16L""" , lowercase ) ) + [0] * 64
for i in range(16 , 80 ):
__lowercase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.padding()
__lowercase = self.split_blocks()
for block in self.blocks:
__lowercase = self.expand_block(lowercase )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
__lowercase = (b & c) | ((~b) & d)
__lowercase = 0X5a82_7999
elif 20 <= i < 40:
__lowercase = b ^ c ^ d
__lowercase = 0X6ed9_eba1
elif 40 <= i < 60:
__lowercase = (b & c) | (b & d) | (c & d)
__lowercase = 0X8f1b_bcdc
elif 60 <= i < 80:
__lowercase = b ^ c ^ d
__lowercase = 0Xca62_c1d6
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = (
self.rotate(lowercase , 5 ) + f + e + k + expanded_block[i] & 0Xffff_ffff,
a,
self.rotate(lowercase , 30 ),
c,
d,
)
__lowercase = (
self.h[0] + a & 0Xffff_ffff,
self.h[1] + b & 0Xffff_ffff,
self.h[2] + c & 0Xffff_ffff,
self.h[3] + d & 0Xffff_ffff,
self.h[4] + e & 0Xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ( ) -> Union[str, Any]:
__lowercase = B"""Test String"""
assert SHAaHash(lowercase__ ).final_hash() == hashlib.shaa(lowercase__ ).hexdigest() # noqa: S324
def UpperCAmelCase__ ( ) -> int:
__lowercase = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
__lowercase = parser.parse_args()
__lowercase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
__lowercase = f.read()
else:
__lowercase = bytes(lowercase__ , """utf-8""" )
print(SHAaHash(lowercase__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 703 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """yolos"""
def __init__( self : Optional[int] , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : str=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-1_2 , lowercase : Tuple=[512, 864] , lowercase : Optional[int]=16 , lowercase : Dict=3 , lowercase : Optional[Any]=True , lowercase : Optional[int]=100 , lowercase : Optional[int]=True , lowercase : Any=False , lowercase : Any=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Union[str, Any]=5 , lowercase : str=2 , lowercase : Tuple=0.1 , **lowercase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = version.parse("""1.11""" )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 634 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
UpperCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : Union[str, Any] , lowercase : int ) -> str:
"""simple docstring"""
if isinstance(lowercase , lowercase ):
__lowercase = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : List[Any] , lowercase : str , lowercase : Any , lowercase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
__lowercase = [sequences]
__lowercase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Tuple=ZeroShotClassificationArgumentHandler() , *lowercase : List[str] , **lowercase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def snake_case__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def snake_case__ ( self : int , lowercase : Optional[Any] , lowercase : Any=True , lowercase : Tuple=True , lowercase : Tuple=TruncationStrategy.ONLY_FIRST , **lowercase : str ) -> str:
"""simple docstring"""
__lowercase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
__lowercase = self.tokenizer.eos_token
try:
__lowercase = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__lowercase = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def snake_case__ ( self : Tuple , **lowercase : Dict ) -> Dict:
"""simple docstring"""
if kwargs.get("""multi_class""" , lowercase ) is not None:
__lowercase = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
__lowercase = {}
if "candidate_labels" in kwargs:
__lowercase = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
__lowercase = kwargs["""hypothesis_template"""]
__lowercase = {}
if "multi_label" in kwargs:
__lowercase = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , lowercase : Union[str, List[str]] , *lowercase : List[Any] , **lowercase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
__lowercase = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(lowercase , **lowercase )
def snake_case__ ( self : List[str] , lowercase : int , lowercase : Tuple=None , lowercase : Tuple="This example is {}." ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
__lowercase = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def snake_case__ ( self : Tuple , lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = inputs["""candidate_label"""]
__lowercase = inputs["""sequence"""]
__lowercase = {k: inputs[k] for k in self.tokenizer.model_input_names}
__lowercase = self.model(**lowercase )
__lowercase = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def snake_case__ ( self : List[str] , lowercase : Union[str, Any] , lowercase : List[str]=False ) -> Tuple:
"""simple docstring"""
__lowercase = [outputs["""candidate_label"""] for outputs in model_outputs]
__lowercase = [outputs["""sequence"""] for outputs in model_outputs]
__lowercase = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
__lowercase = logits.shape[0]
__lowercase = len(lowercase )
__lowercase = N // n
__lowercase = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__lowercase = self.entailment_id
__lowercase = -1 if entailment_id == 0 else 0
__lowercase = reshaped_outputs[..., [contradiction_id, entailment_id]]
__lowercase = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
__lowercase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__lowercase = reshaped_outputs[..., self.entailment_id]
__lowercase = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
__lowercase = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 704 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = IFImgaImgSuperResolutionPipeline
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowercase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self : List[str] , lowercase : Optional[int] , lowercase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Dict ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 634 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( lowercase__ ) -> List[str]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def snake_case__ ( lowercase : ArgumentParser ) -> str:
"""simple docstring"""
__lowercase = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=lowercase , default=lowercase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=lowercase , help="""Name of the model to download""" )
download_parser.set_defaults(func=lowercase )
def __init__( self : int , lowercase : str , lowercase : str , lowercase : bool , lowercase : bool ) -> Any:
"""simple docstring"""
__lowercase = model
__lowercase = cache
__lowercase = force
__lowercase = trust_remote_code
def snake_case__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase__ = "<<<<<<< This should probably be modified because it mentions: "
UpperCamelCase__ = "=======\n>>>>>>>\n"
UpperCamelCase__ = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
UpperCamelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def UpperCAmelCase__ ( lowercase__ ) -> Dict:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def snake_case__ ( lowercase : ArgumentParser ) -> int:
"""simple docstring"""
__lowercase = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=lowercase , required=lowercase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=lowercase , required=lowercase , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=lowercase )
def __init__( self : List[str] , lowercase : str , lowercase : str , *lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = get_logger("""datasets-cli/converting""" )
__lowercase = tfds_path
__lowercase = datasets_directory
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(F"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(lowercase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"Looking at file {f_name}" )
__lowercase = os.path.join(lowercase , lowercase )
__lowercase = os.path.join(lowercase , lowercase )
if not os.path.isfile(lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(lowercase , encoding="""utf-8""" ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
__lowercase = """"""
continue
elif "from absl import logging" in out_line:
__lowercase = """from datasets import logging\n"""
elif "getLogger" in out_line:
__lowercase = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda lowercase : e in out_line , lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase ) + """\n""" )
out_lines.append(lowercase )
out_lines.append(lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(lowercase , lowercase , lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
__lowercase = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace(""".py""" , """""" )
__lowercase = os.path.join(lowercase , lowercase )
__lowercase = os.path.join(lowercase , lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
self._logger.info(F"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowercase )
if needs_manual_update:
with_manual_update.append(lowercase )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.writelines(lowercase )
self._logger.info(F"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(lowercase )
__lowercase = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F"Moving {dest_folder} to {utils_file}" )
shutil.copy(lowercase , lowercase )
except KeyError:
self._logger.error(F"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 706 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : List[Any] = PegasusConfig
lowercase__ : Tuple = {}
lowercase__ : Any = """gelu"""
def __init__( self : Any , lowercase : List[str] , lowercase : List[Any]=13 , lowercase : Any=7 , lowercase : Union[str, Any]=True , lowercase : Any=False , lowercase : Optional[int]=99 , lowercase : int=32 , lowercase : str=5 , lowercase : Union[str, Any]=4 , lowercase : Optional[Any]=37 , lowercase : Optional[int]=0.1 , lowercase : int=0.1 , lowercase : Dict=20 , lowercase : str=2 , lowercase : Any=1 , lowercase : str=0 , ) -> Optional[int]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def snake_case__ ( self : str , lowercase : List[str] , lowercase : List[str] , lowercase : int ) -> int:
"""simple docstring"""
__lowercase = 20
__lowercase = model_class_name(lowercase )
__lowercase = model.encode(inputs_dict["""input_ids"""] )
__lowercase , __lowercase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__lowercase = model.decode(lowercase , lowercase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def snake_case__ ( self : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = 20
__lowercase = model_class_name(lowercase )
__lowercase = model.encode(inputs_dict["""input_ids"""] )
__lowercase , __lowercase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__lowercase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ) -> Tuple:
if attention_mask is None:
__lowercase = np.not_equal(lowercase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ : Union[str, Any] = True
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = FlaxPegasusModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def snake_case__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase : int , lowercase : Dict=None , **lowercase : Any ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__lowercase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = model_class(lowercase )
__lowercase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowercase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase : Tuple , lowercase : Tuple , lowercase : Optional[Any] ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__lowercase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__lowercase = np.ones((1, 1) )
__lowercase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__lowercase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__lowercase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__lowercase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__lowercase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=512 , padding=lowercase )
__lowercase = model.generate(**lowercase , num_beams=2 ).sequences
__lowercase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 707 |
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__ = list[list[float | int]]
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Matrix:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for row in range(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = matrix[row][col]
__lowercase = vector[row][0]
__lowercase = 0
__lowercase = 0
while row < size and col < size:
# pivoting
__lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase__ , lowercase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowercase , __lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase__ ):
__lowercase = augmented[rowa][col] / augmented[row][col]
__lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase__ ):
for row in range(lowercase__ ):
__lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowercase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase__ )
]
def UpperCAmelCase__ ( lowercase__ ) -> Callable[[int], int]:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
__lowercase = [[0] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for x_val, y_val in enumerate(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = (x_val + 1) ** (size - col - 1)
__lowercase = y_val
__lowercase = solve(lowercase__ , lowercase__ )
def interpolated_func(lowercase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowercase__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase__ ( lowercase__ = question_function , lowercase__ = 10 ) -> int:
__lowercase = [func(lowercase__ ) for x_val in range(1 , order + 1 )]
__lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowercase = 0
__lowercase = 42
__lowercase = 42
for poly in polynomials:
__lowercase = 1
while func(lowercase__ ) == poly(lowercase__ ):
x_val += 1
ret += poly(lowercase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
UpperCamelCase__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase__ : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase__ : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
lowercase__ : Optional[str] = field(default=_UpperCAmelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase__ : Optional[str] = field(default=_UpperCAmelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase__ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase__ : int = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} )
lowercase__ : float = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
lowercase__ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = {}
if self.train_dir is not None:
__lowercase = self.train_dir
if self.validation_dir is not None:
__lowercase = self.validation_dir
__lowercase = data_files if data_files else None
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
lowercase__ : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCAmelCase )} , )
lowercase__ : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ : Optional[str] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase__ : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
lowercase__ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ : str = field(default=_UpperCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase__ : bool = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase__ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
lowercase__ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
lowercase__ : Optional[int] = field(
default=_UpperCAmelCase , metadata={"""help""": """Stride to use for the encoder."""} , )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowercase : Optional[Any]=192 , lowercase : int=32 , lowercase : Union[str, Any]=4 , lowercase : List[str]=0.6 ) -> Optional[int]:
"""simple docstring"""
__lowercase = input_size
__lowercase = mask_patch_size
__lowercase = model_patch_size
__lowercase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
__lowercase = self.input_size // self.mask_patch_size
__lowercase = self.mask_patch_size // self.model_patch_size
__lowercase = self.rand_size**2
__lowercase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = np.random.permutation(self.token_count )[: self.mask_count]
__lowercase = np.zeros(self.token_count , dtype=lowercase )
__lowercase = 1
__lowercase = mask.reshape((self.rand_size, self.rand_size) )
__lowercase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
__lowercase = torch.stack([example["""pixel_values"""] for example in examples] )
__lowercase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase__ ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
__lowercase = ds["""train"""].train_test_split(data_args.train_val_split )
__lowercase = split["""train"""]
__lowercase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase__ )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase__ , """decoder_type""" ):
__lowercase = """simmim"""
# adapt config
__lowercase = model_args.image_size if model_args.image_size is not None else config.image_size
__lowercase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__lowercase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__lowercase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
__lowercase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__lowercase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__lowercase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__lowercase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelForMaskedImageModeling.from_config(lowercase__ )
if training_args.do_train:
__lowercase = ds["""train"""].column_names
else:
__lowercase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
__lowercase = data_args.image_column_name
elif "image" in column_names:
__lowercase = """image"""
elif "img" in column_names:
__lowercase = """img"""
else:
__lowercase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__lowercase = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__lowercase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowercase__ ):
__lowercase = [transforms(lowercase__ ) for image in examples[image_column_name]]
__lowercase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__lowercase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__lowercase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Initialize our trainer
__lowercase = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
__lowercase = None
if training_args.resume_from_checkpoint is not None:
__lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase = last_checkpoint
__lowercase = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
__lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
if __name__ == "__main__":
main()
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
import math
def UpperCAmelCase__ ( lowercase__ ) -> bool:
__lowercase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowercase__ )
def UpperCAmelCase__ ( lowercase__ = 1 / 12_345 ) -> int:
__lowercase = 0
__lowercase = 0
__lowercase = 3
while True:
__lowercase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowercase__ ):
__lowercase = int(lowercase__ )
total_partitions += 1
if check_partition_perfect(lowercase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowercase__ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709 |
import unittest
import numpy as np
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
if shape_a[0] != shape_b[0]:
__lowercase = (
"""Expected the same number of rows for A and B. """
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(lowercase__ )
if shape_b[1] != shape_c[1]:
__lowercase = (
"""Expected the same number of columns for B and C. """
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(lowercase__ )
__lowercase = pseudo_inv
if a_inv is None:
try:
__lowercase = np.linalg.inv(lowercase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
__lowercase = schur_complement(lowercase , lowercase , lowercase )
__lowercase = np.block([[a, b], [b.T, c]] )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
self.assertAlmostEqual(lowercase , det_a * det_s )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 634 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = GPTaTokenizer
lowercase__ : str = GPTaTokenizerFast
lowercase__ : List[Any] = True
lowercase__ : List[str] = {"""add_prefix_space""": True}
lowercase__ : Optional[Any] = False
def snake_case__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase = {"""unk_token""": """<unk>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def snake_case__ ( self : Union[str, Any] , **lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : Dict , **lowercase : Optional[int] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : List[str] , lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = """lower newer"""
__lowercase = """lower newer"""
return input_text, output_text
def snake_case__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase = """lower newer"""
__lowercase = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase = tokenizer.tokenize(lowercase , add_prefix_space=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def snake_case__ ( self : Any ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer(add_prefix_space=lowercase )
__lowercase = """lower newer"""
# Testing tokenization
__lowercase = tokenizer.tokenize(lowercase , add_prefix_space=lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids without special tokens
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids with special tokens
__lowercase = self.get_rust_tokenizer(add_prefix_space=lowercase )
__lowercase = tokenizer.encode(lowercase , add_prefix_space=lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing the unknown token
__lowercase = tokens + [rust_tokenizer.unk_token]
__lowercase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def snake_case__ ( self : List[str] , *lowercase : Tuple , **lowercase : int ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Optional[Any]=15 ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
# Simple input
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowercase = ("""This is a simple input""", """This is a pair""")
__lowercase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="""max_length""" , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="""max_length""" , )
def snake_case__ ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input looooooooong""", """This is a simple input"""]
__lowercase = ("""This is a simple input""", """This is a pair""")
__lowercase = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer(lowercase , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
__lowercase = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors="""np""" )
__lowercase = tokenizer(*lowercase , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
__lowercase = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def snake_case__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = """$$$"""
__lowercase = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase , add_bos_token=lowercase )
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer(lowercase )
__lowercase = tokenizer(lowercase )
self.assertEqual(out_s.input_ids[0] , lowercase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowercase = tokenizer.decode(out_s.input_ids )
__lowercase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = [self.get_tokenizer(do_lower_case=lowercase , add_bos_token=lowercase )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = """Encode this."""
__lowercase = """This one too please."""
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
encoded_sequence += tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = tokenizer.encode_plus(
lowercase , lowercase , add_special_tokens=lowercase , return_special_tokens_mask=lowercase , )
__lowercase = encoded_sequence_dict["""input_ids"""]
__lowercase = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(lowercase ) , len(lowercase ) )
__lowercase = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase )
]
__lowercase = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase , lowercase )
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase )
__lowercase = """A photo of a cat"""
__lowercase = tokenizer.encode(
lowercase , )
self.assertEqual(lowercase , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("""test_opt""" )
__lowercase = AutoTokenizer.from_pretrained("""./test_opt""" )
__lowercase = tokenizer.encode(
lowercase , )
self.assertEqual(lowercase , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=lowercase )
__lowercase = """A photo of a cat"""
__lowercase = tokenizer.encode(
lowercase , )
# Same as above
self.assertEqual(lowercase , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def snake_case__ ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase )
__lowercase = """bos"""
__lowercase = tokenizer.get_vocab()["""bos"""]
__lowercase = """A photo of a cat"""
__lowercase = tokenizer.encode(
lowercase , )
# We changed the bos token
self.assertEqual(lowercase , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("""./tok""" )
__lowercase = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
__lowercase = tokenizer.encode(
lowercase , )
self.assertEqual(lowercase , [31_957, 250, 1_345, 9, 10, 4_758] )
| 710 |
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = False ) -> dict:
__lowercase = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def UpperCAmelCase__ ( lowercase__ ) -> dict:
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowercase : Tuple , ) -> Optional[int]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = 2
__lowercase = 99
__lowercase = 0
__lowercase = 32
__lowercase = 2
__lowercase = 4
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 512
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = """last"""
__lowercase = True
__lowercase = None
__lowercase = 0
def snake_case__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__lowercase = None
if self.use_input_lengths:
__lowercase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case__ ( self : Dict , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : int , lowercase : List[Any] , lowercase : List[str] , lowercase : Optional[int] , lowercase : Tuple , ) -> Dict:
"""simple docstring"""
__lowercase = TFFlaubertModel(config=lowercase )
__lowercase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__lowercase = model(lowercase )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : List[str] , lowercase : Dict , lowercase : Optional[Any] , lowercase : Any , lowercase : str , lowercase : List[str] , lowercase : str , ) -> Dict:
"""simple docstring"""
__lowercase = TFFlaubertWithLMHeadModel(lowercase )
__lowercase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__lowercase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : List[Any] , lowercase : List[str] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : str , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : int , lowercase : List[Any] , ) -> Dict:
"""simple docstring"""
__lowercase = TFFlaubertForQuestionAnsweringSimple(lowercase )
__lowercase = {"""input_ids""": input_ids, """lengths""": input_lengths}
__lowercase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : int , lowercase : Any , lowercase : int , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple , lowercase : str , lowercase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFFlaubertForSequenceClassification(lowercase )
__lowercase = {"""input_ids""": input_ids, """lengths""": input_lengths}
__lowercase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Any , lowercase : Dict , lowercase : Union[str, Any] , lowercase : str , lowercase : List[str] , lowercase : Any , lowercase : Any , lowercase : int , lowercase : str , lowercase : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFFlaubertForTokenClassification(config=lowercase )
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , lowercase : Optional[Any] , lowercase : str , lowercase : Dict , lowercase : str , lowercase : int , lowercase : int , lowercase : Any , lowercase : Tuple , lowercase : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFFlaubertForMultipleChoice(config=lowercase )
__lowercase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Dict = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ : List[str] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase__ : Any = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ : str = False
lowercase__ : Dict = False
def snake_case__ ( self : List[Any] , lowercase : Any , lowercase : Dict , lowercase : Dict , lowercase : Tuple , lowercase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFFlaubertModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def snake_case__ ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowercase )
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowercase )
@slow
def snake_case__ ( self : str ) -> Tuple:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFFlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
__lowercase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__lowercase = model(lowercase )[0]
__lowercase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice.
__lowercase = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 711 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase__ = random.Random()
def UpperCAmelCase__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> str:
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Tuple , lowercase : Union[str, Any]=7 , lowercase : List[Any]=400 , lowercase : Any=2_000 , lowercase : Optional[int]=24 , lowercase : Any=24 , lowercase : List[str]=0.0 , lowercase : Dict=16_000 , lowercase : Union[str, Any]=True , lowercase : Dict=True , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = num_mel_bins
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : List[str] , lowercase : Tuple=False , lowercase : int=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(lowercase : Optional[Any] ):
return list(itertools.chain(*lowercase ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = SpeechaTextFeatureExtractor if is_speech_available() else None
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextFeatureExtractionTester(self )
def snake_case__ ( self : Tuple , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowercase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0 ) - 1 ) < 1E-3 ) )
def snake_case__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase , padding=lowercase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowercase = np.asarray(lowercase )
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , padding=lowercase , max_length=lowercase , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , max_length=lowercase , padding=lowercase , return_tensors="""np""" , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""max_length""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=16 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self : Optional[int] , lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
from datasets import load_dataset
__lowercase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__lowercase = ds.sort("""id""" ).select(range(lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = feature_extractor(lowercase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase , atol=1E-4 ) )
| 634 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Any = ["""image_processor""", """tokenizer"""]
lowercase__ : int = """BlipImageProcessor"""
lowercase__ : Optional[Any] = """AutoTokenizer"""
def __init__( self : Tuple , lowercase : Any , lowercase : Any ) -> Any:
"""simple docstring"""
__lowercase = False
super().__init__(lowercase , lowercase )
__lowercase = self.image_processor
def __call__( self : int , lowercase : ImageInput = None , lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase : bool = True , lowercase : Union[bool, str, PaddingStrategy] = False , lowercase : Union[bool, str, TruncationStrategy] = None , lowercase : Optional[int] = None , lowercase : int = 0 , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = True , lowercase : Optional[Union[str, TensorType]] = None , **lowercase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__lowercase = self.tokenizer
__lowercase = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
# add pixel_values
__lowercase = self.image_processor(lowercase , return_tensors=lowercase )
if text is not None:
__lowercase = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
else:
__lowercase = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def snake_case__ ( self : List[Any] , *lowercase : Optional[int] , **lowercase : List[Any] ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def snake_case__ ( self : Optional[int] , *lowercase : List[Any] , **lowercase : str ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*lowercase , **lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 712 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
__lowercase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase__ ( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
UpperCamelCase__ = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ = {F"""funnel-transformer/{name}""": 5_12 for name in _model_names}
UpperCamelCase__ = {F"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Tuple = FunnelTokenizer
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : int = 2
def __init__( self : List[str] , lowercase : Optional[int]=None , lowercase : str=None , lowercase : Any=True , lowercase : Dict="<unk>" , lowercase : Optional[Any]="<sep>" , lowercase : Tuple="<pad>" , lowercase : Dict="<cls>" , lowercase : int="<mask>" , lowercase : int="<s>" , lowercase : str="</s>" , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]=None , lowercase : Optional[int]="##" , **lowercase : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , bos_token=lowercase , eos_token=lowercase , clean_text=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , wordpieces_prefix=lowercase , **lowercase , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__lowercase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**lowercase )
__lowercase = do_lower_case
def snake_case__ ( self : int , lowercase : Any , lowercase : Dict=None ) -> Optional[int]:
"""simple docstring"""
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : List[str] , lowercase : str , lowercase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 713 |
def UpperCAmelCase__ ( lowercase__ = 100 ) -> int:
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 714 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="dummy_doc" ) -> str:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__lowercase = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
__lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[int] , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int=False , lowercase : Dict=False ) -> str:
"""simple docstring"""
__lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 634 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = (EulerDiscreteScheduler,)
lowercase__ : Any = 10
def snake_case__ ( self : Union[str, Any] , **lowercase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowercase )
return config
def snake_case__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def snake_case__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowercase = sample.to(lowercase )
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase , use_karras_sigmas=lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowercase = sample.to(lowercase )
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 715 |
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(lowercase__ )}"
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase__ ( lowercase__ = 3 ) -> qiskit.result.counts.Counts:
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowercase__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
__lowercase = QuantumRegister(lowercase__ , """qr""" )
__lowercase = ClassicalRegister(lowercase__ , """cr""" )
__lowercase = QuantumCircuit(lowercase__ , lowercase__ )
__lowercase = number_of_qubits
for i in range(lowercase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase__ , lowercase__ )
# simulate with 10000 shots
__lowercase = Aer.get_backend("""qasm_simulator""" )
__lowercase = execute(lowercase__ , lowercase__ , shots=10_000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 716 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
lowercase__ : str = "text"
lowercase__ : str = "summary"
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 634 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """fnet"""
def __init__( self : Tuple , lowercase : List[Any]=32_000 , lowercase : Any=768 , lowercase : Optional[Any]=12 , lowercase : Optional[Any]=3_072 , lowercase : List[Any]="gelu_new" , lowercase : int=0.1 , lowercase : Any=512 , lowercase : str=4 , lowercase : Dict=0.02 , lowercase : str=1E-1_2 , lowercase : Any=False , lowercase : Any=512 , lowercase : Optional[Any]=3 , lowercase : Dict=1 , lowercase : List[str]=2 , **lowercase : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = use_tpu_fourier_optimizations
__lowercase = tpu_short_seq_length
| 717 |
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
__lowercase = len(lowercase__ )
__lowercase = sum(lowercase__ )
__lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowercase = True
for i in range(1 , s + 1 ):
__lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
__lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowercase = s - 2 * j
break
return diff
| 634 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 634 | 0 |
from math import pi, sqrt
def UpperCAmelCase__ ( lowercase__ ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(lowercase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowercase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def UpperCAmelCase__ ( ) -> None:
assert gamma(0.5 ) == sqrt(lowercase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase__ = 1.0
while num:
UpperCamelCase__ = float(input("Gamma of: "))
print(F"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 719 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCamelCase__ = "scheduler_config.json"
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[Any] = 1
lowercase__ : Tuple = 2
lowercase__ : Union[str, Any] = 3
lowercase__ : Union[str, Any] = 4
lowercase__ : str = 5
lowercase__ : Any = 6
lowercase__ : Any = 7
lowercase__ : List[str] = 8
lowercase__ : Union[str, Any] = 9
lowercase__ : int = 10
lowercase__ : List[str] = 11
lowercase__ : List[Any] = 12
lowercase__ : str = 13
lowercase__ : Optional[int] = 14
@dataclass
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : torch.FloatTensor
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : Optional[int] = SCHEDULER_CONFIG_NAME
lowercase__ : int = []
lowercase__ : Dict = True
@classmethod
def snake_case__ ( cls : str , lowercase : Dict[str, Any] = None , lowercase : Optional[str] = None , lowercase : Any=False , **lowercase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def snake_case__ ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = False , **lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def snake_case__ ( cls : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split(""".""" )[0] )
__lowercase = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 634 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = """maskformer-swin"""
lowercase__ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[int] , lowercase : str=224 , lowercase : Optional[int]=4 , lowercase : Optional[int]=3 , lowercase : Optional[int]=96 , lowercase : List[Any]=[2, 2, 6, 2] , lowercase : int=[3, 6, 12, 24] , lowercase : Union[str, Any]=7 , lowercase : Union[str, Any]=4.0 , lowercase : int=True , lowercase : str=0.0 , lowercase : Dict=0.0 , lowercase : Tuple=0.1 , lowercase : Union[str, Any]="gelu" , lowercase : int=False , lowercase : List[Any]=0.02 , lowercase : List[str]=1E-5 , lowercase : Any=None , lowercase : Any=None , **lowercase : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowercase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowercase ) - 1) )
__lowercase = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(lowercase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
| 720 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """sew"""
def __init__( self : List[Any] , lowercase : int=32 , lowercase : List[str]=768 , lowercase : Dict=12 , lowercase : str=12 , lowercase : str=3_072 , lowercase : Optional[int]=2 , lowercase : List[str]="gelu" , lowercase : List[str]=0.1 , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : Any=0.0 , lowercase : Dict=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[str]=0.02 , lowercase : Dict=1E-5 , lowercase : Tuple="group" , lowercase : int="gelu" , lowercase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase : List[str]=False , lowercase : Tuple=128 , lowercase : int=16 , lowercase : Union[str, Any]=True , lowercase : List[str]=0.05 , lowercase : Optional[int]=10 , lowercase : Any=2 , lowercase : Optional[Any]=0.0 , lowercase : Optional[Any]=10 , lowercase : int=0 , lowercase : Optional[int]="mean" , lowercase : List[Any]=False , lowercase : str=False , lowercase : int=256 , lowercase : str=0 , lowercase : List[Any]=1 , lowercase : List[Any]=2 , **lowercase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = squeeze_factor
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# sequence classification
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
@property
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 634 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] , lowercase : str ) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__lowercase = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def snake_case__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """sshleifer/tiny-gpt2"""
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = """sgugger/tiny-distilbert-classification"""
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
__lowercase = PyTorchBenchmark(lowercase )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = """sshleifer/tiny-gpt2"""
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = """sshleifer/tiny-gpt2"""
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = """sshleifer/tiny-gpt2"""
__lowercase = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
__lowercase = None
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase , configs=[config] )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = """sshleifer/tiny-gpt2"""
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def snake_case__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = """sshleifer/tiny-gpt2"""
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = """sshleifer/tiny-gpt2"""
__lowercase = AutoConfig.from_pretrained(lowercase )
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase , configs=[config] )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = """sshleifer/tinier_bart"""
__lowercase = AutoConfig.from_pretrained(lowercase )
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase , configs=[config] )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = """sshleifer/tiny-gpt2"""
__lowercase = AutoConfig.from_pretrained(lowercase )
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase , configs=[config] )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = """sshleifer/tinier_bart"""
__lowercase = AutoConfig.from_pretrained(lowercase )
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase , configs=[config] )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(lowercase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(lowercase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(lowercase , """train_time.csv""" ) , env_info_csv_file=os.path.join(lowercase , """env.csv""" ) , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """env.csv""" ) ).exists() )
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowercase : Optional[Any] ):
self.assertTrue(hasattr(lowercase , """sequential""" ) )
self.assertTrue(hasattr(lowercase , """cumulative""" ) )
self.assertTrue(hasattr(lowercase , """current""" ) )
self.assertTrue(hasattr(lowercase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , """log.txt""" ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
__lowercase = PyTorchBenchmark(lowercase )
__lowercase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , """log.txt""" ) ).exists() )
| 721 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = WavaVecaPhonemeCTCTokenizer
lowercase__ : Optional[int] = False
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
super().setUp()
__lowercase = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
def snake_case__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[str]=False , lowercase : List[str]=20 , lowercase : str=5 ) -> Tuple[str, list]:
"""simple docstring"""
__lowercase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )) for i in range(len(lowercase ) )]
__lowercase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
__lowercase = """ """ + output_txt
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def snake_case__ ( self : Tuple , **lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
__lowercase = tokenizer("""m xxx ɪ""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
__lowercase = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__lowercase = tokenizer("""maɪ c""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [3, 200] ) # mai should be <unk> (=3)
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def snake_case__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase )
__lowercase = tokenizer.batch_decode(lowercase , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowercase )
__lowercase = """Hello how are you"""
__lowercase = tokenizer(lowercase , phonemizer_lang="""en-us""" ).input_ids
__lowercase = tokenizer(lowercase , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowercase , lowercase )
__lowercase = tokenizer.decode(lowercase )
__lowercase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowercase , """ɛ l o h aʊ a ʁ j u""" )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how Are you"""
__lowercase = """hello how are you"""
__lowercase = tokenizer(lowercase ).input_ids
__lowercase = tokenizer(lowercase ).input_ids
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def snake_case__ ( lowercase : List[str] , lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowercase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__lowercase = tokenizer.decode(lowercase , output_char_offsets=lowercase , filter_word_delimiter_token=lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowercase , lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowercase : List[str] , lowercase : Dict ):
self.assertTrue(isinstance(lowercase , lowercase ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase ) )
# transform list to ModelOutput
__lowercase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowercase : List[Any] , lowercase : Optional[int] ):
if isinstance(lowercase , lowercase ):
[recursive_check(lowercase , lowercase ) for la, la in zip(lowercase , lowercase )]
self.assertEqual(lowercase , lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowercase = tokenizer.batch_decode(lowercase , output_char_offsets=lowercase )
__lowercase = [tokenizer.decode(lowercase , output_char_offsets=lowercase ) for ids in sample_ids]
check_list_tuples_equal(lowercase , lowercase )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def snake_case__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def snake_case__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowercase = tokenizer.add_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size + len(lowercase ) )
__lowercase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowercase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowercase = tokenizer.add_special_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size_a + len(lowercase ) )
__lowercase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
__lowercase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(output["""text"""] , lowercase )
| 634 | 0 |
from __future__ import annotations
def UpperCAmelCase__ ( lowercase__ ) -> bool:
if len(lowercase__ ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 634 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def snake_case__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=lowercase , dtype=jnp.bfloataa )
__lowercase , __lowercase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowercase , from_pt=lowercase , dtype=jnp.bfloataa )
__lowercase = controlnet_params
__lowercase = """bird"""
__lowercase = jax.device_count()
__lowercase = pipe.prepare_text_inputs([prompts] * num_samples )
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
__lowercase = pipe.prepare_image_inputs([canny_image] * num_samples )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(lowercase , jax.device_count() )
__lowercase = replicate(lowercase )
__lowercase = shard(lowercase )
__lowercase = shard(lowercase )
__lowercase = pipe(
prompt_ids=lowercase , image=lowercase , params=lowercase , prng_seed=lowercase , num_inference_steps=50 , jit=lowercase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=lowercase , dtype=jnp.bfloataa )
__lowercase , __lowercase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowercase , from_pt=lowercase , dtype=jnp.bfloataa )
__lowercase = controlnet_params
__lowercase = """Chef in the kitchen"""
__lowercase = jax.device_count()
__lowercase = pipe.prepare_text_inputs([prompts] * num_samples )
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
__lowercase = pipe.prepare_image_inputs([pose_image] * num_samples )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(lowercase , jax.device_count() )
__lowercase = replicate(lowercase )
__lowercase = shard(lowercase )
__lowercase = shard(lowercase )
__lowercase = pipe(
prompt_ids=lowercase , image=lowercase , params=lowercase , prng_seed=lowercase , num_inference_steps=50 , jit=lowercase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 701 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = LxmertTokenizer
lowercase__ : List[str] = LxmertTokenizerFast
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , lowercase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def snake_case__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
| 634 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def UpperCAmelCase__ ( lowercase__ ):
return int(x / 2**20 )
class _lowerCAmelCase :
"""simple docstring"""
def __enter__( self : Any ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowercase = torch.cuda.memory_allocated()
return self
def __exit__( self : Optional[int] , *lowercase : str ) -> str:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
__lowercase = torch.cuda.memory_allocated()
__lowercase = torch.cuda.max_memory_allocated()
__lowercase = bamb(self.end - self.begin )
__lowercase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCAmelCase__ ( lowercase__ , lowercase__ = 16 , lowercase__ = "bert-base-cased" , lowercase__ = 320 , lowercase__ = 160 , ):
__lowercase = AutoTokenizer.from_pretrained(lowercase__ )
__lowercase = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F"train[:{n_train}]", """validation""": F"validation[:{n_val}]"} )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
__lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase__ ( lowercase__ , lowercase__ ):
# Initialize accelerator
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config["""lr"""]
__lowercase = int(config["""num_epochs"""] )
__lowercase = int(config["""seed"""] )
__lowercase = int(config["""batch_size"""] )
__lowercase = args.model_name_or_path
set_seed(lowercase__ )
__lowercase , __lowercase = get_dataloaders(lowercase__ , lowercase__ , lowercase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowercase = 1
__lowercase = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
__lowercase = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
# Now we train the model
__lowercase = {}
for epoch in range(lowercase__ , lowercase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase__ ):
__lowercase = model(**lowercase__ )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowercase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def UpperCAmelCase__ ( ):
__lowercase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowercase__ , default=lowercase__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowercase__ , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowercase__ , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=1 , help="""Number of train epochs.""" , )
__lowercase = parser.parse_args()
__lowercase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 702 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
__lowercase = len(lowercase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCAmelCase__ ( lowercase__ ) -> List[str]:
__lowercase = model.config
__lowercase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__lowercase = MBartConfig(
is_decoder=lowercase__ , is_encoder_decoder=lowercase__ , add_cross_attention=lowercase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase__ , add_final_layer_norm=lowercase__ , )
return encoder_config, decoder_config
def UpperCAmelCase__ ( lowercase__ ) -> int:
if "encoder.model" in name:
__lowercase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
__lowercase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
__lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowercase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
__lowercase = """encoder.""" + name
if "attn.proj" in name:
__lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
__lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
__lowercase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
__lowercase = """encoder.layernorm.bias"""
return name
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Any:
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
__lowercase = key.split(""".""" )
__lowercase = int(key_split[3] )
__lowercase = int(key_split[5] )
__lowercase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[dim : dim * 2]
__lowercase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( lowercase__ , lowercase__=None , lowercase__=False ) -> List[Any]:
# load original model
__lowercase = DonutModel.from_pretrained(lowercase__ ).eval()
# load HuggingFace model
__lowercase , __lowercase = get_configs(lowercase__ )
__lowercase = DonutSwinModel(lowercase__ )
__lowercase = MBartForCausalLM(lowercase__ )
__lowercase = VisionEncoderDecoderModel(encoder=lowercase__ , decoder=lowercase__ )
model.eval()
__lowercase = original_model.state_dict()
__lowercase = convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
# verify results on scanned document
__lowercase = load_dataset("""hf-internal-testing/example-documents""" )
__lowercase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
__lowercase = XLMRobertaTokenizerFast.from_pretrained(lowercase__ , from_slow=lowercase__ )
__lowercase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__lowercase = DonutProcessor(lowercase__ , lowercase__ )
__lowercase = processor(lowercase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__lowercase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowercase = """When is the coffee break?"""
__lowercase = task_prompt.replace("""{user_input}""" , lowercase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__lowercase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__lowercase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__lowercase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__lowercase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__lowercase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
__lowercase = original_model.decoder.tokenizer(lowercase__ , add_special_tokens=lowercase__ , return_tensors="""pt""" )[
"""input_ids"""
]
__lowercase = original_model.encoder.model.patch_embed(lowercase__ )
__lowercase , __lowercase = model.encoder.embeddings(lowercase__ )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
# verify encoder hidden states
__lowercase = original_model.encoder(lowercase__ )
__lowercase = model.encoder(lowercase__ ).last_hidden_state
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-2 )
# verify decoder hidden states
__lowercase = original_model(lowercase__ , lowercase__ , lowercase__ ).logits
__lowercase = model(lowercase__ , decoder_input_ids=lowercase__ ).logits
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
UpperCamelCase__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 703 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """yolos"""
def __init__( self : Optional[int] , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : str=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-1_2 , lowercase : Tuple=[512, 864] , lowercase : Optional[int]=16 , lowercase : Dict=3 , lowercase : Optional[Any]=True , lowercase : Optional[int]=100 , lowercase : Optional[int]=True , lowercase : Any=False , lowercase : Any=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Union[str, Any]=5 , lowercase : str=2 , lowercase : Tuple=0.1 , **lowercase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = version.parse("""1.11""" )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 634 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase__ ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
__lowercase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__lowercase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
__lowercase = PipelineDataFormat.from_str(
format=lowercase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase__ , lowercase__ )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , lowercase : Pipeline , lowercase : PipelineDataFormat ) -> Optional[Any]:
"""simple docstring"""
__lowercase = nlp
__lowercase = reader
@staticmethod
def snake_case__ ( lowercase : ArgumentParser ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def snake_case__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self._nlp, []
for entry in self._reader:
__lowercase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__lowercase = self._reader.save_binary(lowercase )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(lowercase )
| 704 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = IFImgaImgSuperResolutionPipeline
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowercase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self : List[str] , lowercase : Optional[int] , lowercase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Dict ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> float:
_validate_point(lowercase__ )
_validate_point(lowercase__ )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(lowercase__ , lowercase__ ) ) )
def UpperCAmelCase__ ( lowercase__ ) -> None:
if point:
if isinstance(lowercase__ , lowercase__ ):
for item in point:
if not isinstance(lowercase__ , (int, float) ):
__lowercase = (
"""Expected a list of numbers as input, found """
F"{type(lowercase__ ).__name__}"
)
raise TypeError(lowercase__ )
else:
__lowercase = F"Expected a list of numbers as input, found {type(lowercase__ ).__name__}"
raise TypeError(lowercase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> float:
_validate_point(lowercase__ )
_validate_point(lowercase__ )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(lowercase__ , lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ = logging.getLogger()
def UpperCAmelCase__ ( ) -> List[Any]:
__lowercase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__lowercase = parser.parse_args()
return args.f
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> None:
"""simple docstring"""
__lowercase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase )
def snake_case__ ( self : Dict , lowercase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowercase , """argv""" , lowercase ):
__lowercase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase , 0.666 )
@slow
@require_torch_non_multi_gpu
def snake_case__ ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowercase )
__lowercase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowercase )
__lowercase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowercase )
| 706 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Dict = BioGptTokenizer
lowercase__ : Union[str, Any] = False
def snake_case__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowercase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def snake_case__ ( self : Tuple , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = """lower newer"""
__lowercase = """lower newer"""
return input_text, output_text
def snake_case__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BioGptTokenizer(self.vocab_file , self.merges_file )
__lowercase = """lower"""
__lowercase = ["""low""", """er</w>"""]
__lowercase = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokens + ["""<unk>"""]
__lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
@slow
def snake_case__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowercase )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 707 |
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__ = list[list[float | int]]
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Matrix:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for row in range(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = matrix[row][col]
__lowercase = vector[row][0]
__lowercase = 0
__lowercase = 0
while row < size and col < size:
# pivoting
__lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase__ , lowercase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowercase , __lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase__ ):
__lowercase = augmented[rowa][col] / augmented[row][col]
__lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase__ ):
for row in range(lowercase__ ):
__lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowercase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase__ )
]
def UpperCAmelCase__ ( lowercase__ ) -> Callable[[int], int]:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
__lowercase = [[0] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for x_val, y_val in enumerate(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = (x_val + 1) ** (size - col - 1)
__lowercase = y_val
__lowercase = solve(lowercase__ , lowercase__ )
def interpolated_func(lowercase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowercase__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase__ ( lowercase__ = question_function , lowercase__ = 10 ) -> int:
__lowercase = [func(lowercase__ ) for x_val in range(1 , order + 1 )]
__lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowercase = 0
__lowercase = 42
__lowercase = 42
for poly in polynomials:
__lowercase = 1
while func(lowercase__ ) == poly(lowercase__ ):
x_val += 1
ret += poly(lowercase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( lowercase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( lowercase__ ) -> Union[str, Any]:
return EnvironmentCommand(args.accelerate_config_file )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def snake_case__ ( lowercase : ArgumentParser ) -> Any:
"""simple docstring"""
__lowercase = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowercase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowercase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowercase )
def __init__( self : Dict , lowercase : Tuple , *lowercase : Optional[int] ) -> None:
"""simple docstring"""
__lowercase = accelerate_config_file
def snake_case__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = """not installed"""
if is_safetensors_available():
import safetensors
__lowercase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
__lowercase = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
__lowercase = """not installed"""
__lowercase = __lowercase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__lowercase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowercase ):
__lowercase = load_config_from_file(self._accelerate_config_file ).to_dict()
__lowercase = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(lowercase , lowercase )
else F"\t{accelerate_config}"
)
__lowercase = """not installed"""
__lowercase = """NA"""
if is_torch_available():
import torch
__lowercase = torch.__version__
__lowercase = torch.cuda.is_available()
__lowercase = """not installed"""
__lowercase = """NA"""
if is_tf_available():
import tensorflow as tf
__lowercase = tf.__version__
try:
# deprecated in v2.1
__lowercase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__lowercase = bool(tf.config.list_physical_devices("""GPU""" ) )
__lowercase = """not installed"""
__lowercase = """not installed"""
__lowercase = """not installed"""
__lowercase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
__lowercase = flax.__version__
__lowercase = jax.__version__
__lowercase = jaxlib.__version__
__lowercase = jax.lib.xla_bridge.get_backend().platform
__lowercase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F"{safetensors_version}",
"""Accelerate version""": F"{accelerate_version}",
"""Accelerate config""": F"{accelerate_config_str}",
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""Tensorflow version (GPU?)""": F"{tf_version} ({tf_cuda_available})",
"""Flax version (CPU?/GPU?/TPU?)""": F"{flax_version} ({jax_backend})",
"""Jax version""": F"{jax_version}",
"""JaxLib version""": F"{jaxlib_version}",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowercase ) )
return info
@staticmethod
def snake_case__ ( lowercase : str ) -> str:
"""simple docstring"""
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowercase__ , n - 1 , lowercase__ ) * a) % mod
else:
__lowercase = binary_exponentiation(lowercase__ , n / 2 , lowercase__ )
return (b * b) % mod
# a prime number
UpperCamelCase__ = 7_01
UpperCamelCase__ = 10_00_00_00_00
UpperCamelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 709 |
import unittest
import numpy as np
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
if shape_a[0] != shape_b[0]:
__lowercase = (
"""Expected the same number of rows for A and B. """
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(lowercase__ )
if shape_b[1] != shape_c[1]:
__lowercase = (
"""Expected the same number of columns for B and C. """
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(lowercase__ )
__lowercase = pseudo_inv
if a_inv is None:
try:
__lowercase = np.linalg.inv(lowercase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
__lowercase = schur_complement(lowercase , lowercase , lowercase )
__lowercase = np.block([[a, b], [b.T, c]] )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
self.assertAlmostEqual(lowercase , det_a * det_s )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 634 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowercase__ ) -> Union[str, Any]:
__lowercase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__lowercase = 128
elif "12-12" in model_name:
__lowercase = 12
__lowercase = 12
elif "14-14" in model_name:
__lowercase = 14
__lowercase = 14
elif "16-16" in model_name:
__lowercase = 16
__lowercase = 16
else:
raise ValueError("""Model not supported""" )
__lowercase = """huggingface/label-files"""
if "speech-commands" in model_name:
__lowercase = 35
__lowercase = """speech-commands-v2-id2label.json"""
else:
__lowercase = 527
__lowercase = """audioset-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( lowercase__ ) -> Union[str, Any]:
if "module.v" in name:
__lowercase = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__lowercase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__lowercase = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__lowercase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__lowercase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__lowercase = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__lowercase = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__lowercase = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Any:
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
__lowercase = key.split(""".""" )
__lowercase = int(key_split[3] )
__lowercase = config.hidden_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[dim : dim * 2]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( lowercase__ ) -> List[str]:
__lowercase = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
@torch.no_grad()
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False ) -> str:
__lowercase = get_audio_spectrogram_transformer_config(lowercase__ )
__lowercase = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__lowercase = model_name_to_url[model_name]
__lowercase = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
# remove some keys
remove_keys(lowercase__ )
# rename some keys
__lowercase = convert_state_dict(lowercase__ , lowercase__ )
# load 🤗 model
__lowercase = ASTForAudioClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__lowercase = -4.2677393 if """speech-commands""" not in model_name else -6.845978
__lowercase = 4.5689974 if """speech-commands""" not in model_name else 5.5654526
__lowercase = 1_024 if """speech-commands""" not in model_name else 128
__lowercase = ASTFeatureExtractor(mean=lowercase__ , std=lowercase__ , max_length=lowercase__ )
if "speech-commands" in model_name:
__lowercase = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__lowercase = dataset[0]["""audio"""]["""array"""]
else:
__lowercase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__lowercase , __lowercase = torchaudio.load(lowercase__ )
__lowercase = waveform.squeeze().numpy()
__lowercase = feature_extractor(lowercase__ , sampling_rate=16_000 , return_tensors="""pt""" )
# forward pass
__lowercase = model(**lowercase__ )
__lowercase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__lowercase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__lowercase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__lowercase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__lowercase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__lowercase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__lowercase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__lowercase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__lowercase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase__ )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(lowercase__ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 710 |
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = False ) -> dict:
__lowercase = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def UpperCAmelCase__ ( lowercase__ ) -> dict:
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 711 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase__ = random.Random()
def UpperCAmelCase__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> str:
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Tuple , lowercase : Union[str, Any]=7 , lowercase : List[Any]=400 , lowercase : Any=2_000 , lowercase : Optional[int]=24 , lowercase : Any=24 , lowercase : List[str]=0.0 , lowercase : Dict=16_000 , lowercase : Union[str, Any]=True , lowercase : Dict=True , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = num_mel_bins
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : List[str] , lowercase : Tuple=False , lowercase : int=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(lowercase : Optional[Any] ):
return list(itertools.chain(*lowercase ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = SpeechaTextFeatureExtractor if is_speech_available() else None
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextFeatureExtractionTester(self )
def snake_case__ ( self : Tuple , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowercase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0 ) - 1 ) < 1E-3 ) )
def snake_case__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase , padding=lowercase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowercase = np.asarray(lowercase )
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , padding=lowercase , max_length=lowercase , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , max_length=lowercase , padding=lowercase , return_tensors="""np""" , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""max_length""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=16 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self : Optional[int] , lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
from datasets import load_dataset
__lowercase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__lowercase = ds.sort("""id""" ).select(range(lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = feature_extractor(lowercase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase , atol=1E-4 ) )
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ = 100 ) -> int:
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
__lowercase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase__ ( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def UpperCAmelCase__ ( lowercase__ ) -> Dict:
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , lowercase : str ) -> List[str]:
"""simple docstring"""
__lowercase = metric_id
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : List[str] = [MetricMock(_UpperCAmelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def snake_case__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
if "tmp_path" in args:
__lowercase = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(lowercase__ , match="""https://huggingface.co/docs/evaluate""" ):
func(*lowercase__ )
| 713 |
def UpperCAmelCase__ ( lowercase__ = 100 ) -> int:
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """yolos"""
def __init__( self : Optional[int] , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : str=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-1_2 , lowercase : Tuple=[512, 864] , lowercase : Optional[int]=16 , lowercase : Dict=3 , lowercase : Optional[Any]=True , lowercase : Optional[int]=100 , lowercase : Optional[int]=True , lowercase : Any=False , lowercase : Any=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Union[str, Any]=5 , lowercase : str=2 , lowercase : Tuple=0.1 , **lowercase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = version.parse("""1.11""" )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 714 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="dummy_doc" ) -> str:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__lowercase = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
__lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[int] , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int=False , lowercase : Dict=False ) -> str:
"""simple docstring"""
__lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 634 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = WavaVecaPhonemeCTCTokenizer
lowercase__ : Optional[int] = False
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
super().setUp()
__lowercase = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
def snake_case__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[str]=False , lowercase : List[str]=20 , lowercase : str=5 ) -> Tuple[str, list]:
"""simple docstring"""
__lowercase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )) for i in range(len(lowercase ) )]
__lowercase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
__lowercase = """ """ + output_txt
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def snake_case__ ( self : Tuple , **lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
__lowercase = tokenizer("""m xxx ɪ""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
__lowercase = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__lowercase = tokenizer("""maɪ c""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [3, 200] ) # mai should be <unk> (=3)
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def snake_case__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase )
__lowercase = tokenizer.batch_decode(lowercase , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowercase )
__lowercase = """Hello how are you"""
__lowercase = tokenizer(lowercase , phonemizer_lang="""en-us""" ).input_ids
__lowercase = tokenizer(lowercase , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowercase , lowercase )
__lowercase = tokenizer.decode(lowercase )
__lowercase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowercase , """ɛ l o h aʊ a ʁ j u""" )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how Are you"""
__lowercase = """hello how are you"""
__lowercase = tokenizer(lowercase ).input_ids
__lowercase = tokenizer(lowercase ).input_ids
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def snake_case__ ( lowercase : List[str] , lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowercase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__lowercase = tokenizer.decode(lowercase , output_char_offsets=lowercase , filter_word_delimiter_token=lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowercase , lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowercase : List[str] , lowercase : Dict ):
self.assertTrue(isinstance(lowercase , lowercase ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase ) )
# transform list to ModelOutput
__lowercase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowercase : List[Any] , lowercase : Optional[int] ):
if isinstance(lowercase , lowercase ):
[recursive_check(lowercase , lowercase ) for la, la in zip(lowercase , lowercase )]
self.assertEqual(lowercase , lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowercase = tokenizer.batch_decode(lowercase , output_char_offsets=lowercase )
__lowercase = [tokenizer.decode(lowercase , output_char_offsets=lowercase ) for ids in sample_ids]
check_list_tuples_equal(lowercase , lowercase )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def snake_case__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def snake_case__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowercase = tokenizer.add_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size + len(lowercase ) )
__lowercase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowercase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowercase = tokenizer.add_special_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size_a + len(lowercase ) )
__lowercase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
__lowercase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(output["""text"""] , lowercase )
| 715 |
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(lowercase__ )}"
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = IFImgaImgSuperResolutionPipeline
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowercase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self : List[str] , lowercase : Optional[int] , lowercase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Dict ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 716 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
lowercase__ : str = "text"
lowercase__ : str = "summary"
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 634 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = """switch_transformers"""
lowercase__ : int = ["""past_key_values"""]
lowercase__ : Dict = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Any , lowercase : List[Any]=32_128 , lowercase : Optional[int]=768 , lowercase : Optional[int]=64 , lowercase : int=2_048 , lowercase : Union[str, Any]=64 , lowercase : List[Any]=12 , lowercase : Tuple=3 , lowercase : Tuple=12 , lowercase : Union[str, Any]=3 , lowercase : List[str]=12 , lowercase : int=8 , lowercase : Optional[Any]=False , lowercase : List[str]=0.01 , lowercase : str="float32" , lowercase : int=False , lowercase : Any=32 , lowercase : Optional[int]=128 , lowercase : Tuple=0.1 , lowercase : Union[str, Any]=1E-6 , lowercase : List[str]=0.001 , lowercase : Optional[Any]=0.001 , lowercase : Any=1.0 , lowercase : Dict="relu" , lowercase : List[Any]=True , lowercase : Any=False , lowercase : Optional[Any]=True , lowercase : Dict=0 , lowercase : List[Any]=1 , **lowercase : str , ) -> str:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_sparse_encoder_layers
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__lowercase = self.num_layers // self.num_sparse_encoder_layers
else:
__lowercase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__lowercase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__lowercase = self.num_decoder_layers # HACK: this will create 0 sparse layers
__lowercase = num_heads
__lowercase = num_experts
__lowercase = expert_capacity
__lowercase = router_bias
__lowercase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase = router_dtype
__lowercase = router_ignore_padding_tokens
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = add_router_probs
__lowercase = router_z_loss_coef
__lowercase = router_aux_loss_coef
__lowercase = self.feed_forward_proj.split("""-""" )
__lowercase = act_info[-1]
__lowercase = act_info[0] == """gated"""
if len(lowercase ) > 1 and act_info[0] != "gated" or len(lowercase ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowercase = """gelu_new"""
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , **lowercase , )
| 717 |
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
__lowercase = len(lowercase__ )
__lowercase = sum(lowercase__ )
__lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowercase = True
for i in range(1 , s + 1 ):
__lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
__lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowercase = s - 2 * j
break
return diff
| 634 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ = {
"roberta-base": 5_12,
"roberta-large": 5_12,
"roberta-large-mnli": 5_12,
"distilroberta-base": 5_12,
"roberta-base-openai-detector": 5_12,
"roberta-large-openai-detector": 5_12,
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
lowercase__ : Tuple = RobertaTokenizer
def __init__( self : Dict , lowercase : Optional[int]=None , lowercase : Union[str, Any]=None , lowercase : Tuple=None , lowercase : Optional[int]="replace" , lowercase : Union[str, Any]="<s>" , lowercase : Optional[Any]="</s>" , lowercase : Union[str, Any]="</s>" , lowercase : List[Any]="<s>" , lowercase : Optional[Any]="<unk>" , lowercase : List[Any]="<pad>" , lowercase : List[str]="<mask>" , lowercase : Optional[Any]=False , lowercase : Dict=True , **lowercase : str , ) -> Tuple:
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
__lowercase = getattr(lowercase , pre_tok_state.pop("""type""" ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**lowercase )
__lowercase = add_prefix_space
__lowercase = """post_processor"""
__lowercase = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
__lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowercase = tuple(state["""sep"""] )
if "cls" in state:
__lowercase = tuple(state["""cls"""] )
__lowercase = False
if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
__lowercase = add_prefix_space
__lowercase = True
if state.get("""trim_offsets""" , lowercase ) != trim_offsets:
__lowercase = trim_offsets
__lowercase = True
if changes_to_apply:
__lowercase = getattr(lowercase , state.pop("""type""" ) )
__lowercase = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def snake_case__ ( self : Any ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self : Any , lowercase : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
__lowercase = value
def snake_case__ ( self : List[Any] , *lowercase : Tuple , **lowercase : Dict ) -> BatchEncoding:
"""simple docstring"""
__lowercase = kwargs.get("""is_split_into_words""" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase )
def snake_case__ ( self : Any , *lowercase : int , **lowercase : Optional[int] ) -> BatchEncoding:
"""simple docstring"""
__lowercase = kwargs.get("""is_split_into_words""" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase )
def snake_case__ ( self : int , lowercase : str , lowercase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
__lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 718 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 634 | 0 |
import argparse
from collections import defaultdict
import yaml
UpperCamelCase__ = "docs/source/en/_toctree.yml"
def UpperCAmelCase__ ( lowercase__ ) -> str:
__lowercase = defaultdict(lowercase__ )
for doc in model_doc:
counts[doc["local"]] += 1
__lowercase = [key for key, value in counts.items() if value > 1]
__lowercase = []
for duplicate_key in duplicates:
__lowercase = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(lowercase__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(lowercase__ , key=lambda lowercase__ : s["title"].lower() )
def UpperCAmelCase__ ( lowercase__=False ) -> Any:
with open(lowercase__ , encoding="""utf-8""" ) as f:
__lowercase = yaml.safe_load(f.read() )
# Get to the API doc
__lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowercase = content[api_idx]["""sections"""]
# Then to the model doc
__lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__lowercase = api_doc[model_idx]["""sections"""]
__lowercase = [(idx, section) for idx, section in enumerate(lowercase__ ) if """sections""" in section]
__lowercase = False
for idx, modality_doc in modalities_docs:
__lowercase = modality_doc["""sections"""]
__lowercase = clean_model_doc_toc(lowercase__ )
if old_modality_doc != new_modality_doc:
__lowercase = True
if overwrite:
__lowercase = new_modality_doc
if diff:
if overwrite:
__lowercase = model_doc
__lowercase = api_doc
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCamelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 719 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCamelCase__ = "scheduler_config.json"
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[Any] = 1
lowercase__ : Tuple = 2
lowercase__ : Union[str, Any] = 3
lowercase__ : Union[str, Any] = 4
lowercase__ : str = 5
lowercase__ : Any = 6
lowercase__ : Any = 7
lowercase__ : List[str] = 8
lowercase__ : Union[str, Any] = 9
lowercase__ : int = 10
lowercase__ : List[str] = 11
lowercase__ : List[Any] = 12
lowercase__ : str = 13
lowercase__ : Optional[int] = 14
@dataclass
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : torch.FloatTensor
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : Optional[int] = SCHEDULER_CONFIG_NAME
lowercase__ : int = []
lowercase__ : Dict = True
@classmethod
def snake_case__ ( cls : str , lowercase : Dict[str, Any] = None , lowercase : Optional[str] = None , lowercase : Any=False , **lowercase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def snake_case__ ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = False , **lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def snake_case__ ( cls : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split(""".""" )[0] )
__lowercase = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
__lowercase = len(lowercase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """sew"""
def __init__( self : List[Any] , lowercase : int=32 , lowercase : List[str]=768 , lowercase : Dict=12 , lowercase : str=12 , lowercase : str=3_072 , lowercase : Optional[int]=2 , lowercase : List[str]="gelu" , lowercase : List[str]=0.1 , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : Any=0.0 , lowercase : Dict=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[str]=0.02 , lowercase : Dict=1E-5 , lowercase : Tuple="group" , lowercase : int="gelu" , lowercase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase : List[str]=False , lowercase : Tuple=128 , lowercase : int=16 , lowercase : Union[str, Any]=True , lowercase : List[str]=0.05 , lowercase : Optional[int]=10 , lowercase : Any=2 , lowercase : Optional[Any]=0.0 , lowercase : Optional[Any]=10 , lowercase : int=0 , lowercase : Optional[int]="mean" , lowercase : List[Any]=False , lowercase : str=False , lowercase : int=256 , lowercase : str=0 , lowercase : List[Any]=1 , lowercase : List[Any]=2 , **lowercase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = squeeze_factor
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# sequence classification
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
@property
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 634 | 0 |
import qiskit
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> qiskit.result.counts.Counts:
__lowercase = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__lowercase = qiskit.QuantumCircuit(lowercase__ , lowercase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowercase = qiskit.execute(lowercase__ , lowercase__ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 721 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = WavaVecaPhonemeCTCTokenizer
lowercase__ : Optional[int] = False
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
super().setUp()
__lowercase = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
def snake_case__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[str]=False , lowercase : List[str]=20 , lowercase : str=5 ) -> Tuple[str, list]:
"""simple docstring"""
__lowercase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )) for i in range(len(lowercase ) )]
__lowercase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
__lowercase = """ """ + output_txt
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def snake_case__ ( self : Tuple , **lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
__lowercase = tokenizer("""m xxx ɪ""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
__lowercase = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__lowercase = tokenizer("""maɪ c""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [3, 200] ) # mai should be <unk> (=3)
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def snake_case__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase )
__lowercase = tokenizer.batch_decode(lowercase , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowercase )
__lowercase = """Hello how are you"""
__lowercase = tokenizer(lowercase , phonemizer_lang="""en-us""" ).input_ids
__lowercase = tokenizer(lowercase , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowercase , lowercase )
__lowercase = tokenizer.decode(lowercase )
__lowercase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowercase , """ɛ l o h aʊ a ʁ j u""" )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how Are you"""
__lowercase = """hello how are you"""
__lowercase = tokenizer(lowercase ).input_ids
__lowercase = tokenizer(lowercase ).input_ids
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def snake_case__ ( lowercase : List[str] , lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowercase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__lowercase = tokenizer.decode(lowercase , output_char_offsets=lowercase , filter_word_delimiter_token=lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowercase , lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowercase : List[str] , lowercase : Dict ):
self.assertTrue(isinstance(lowercase , lowercase ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase ) )
# transform list to ModelOutput
__lowercase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowercase : List[Any] , lowercase : Optional[int] ):
if isinstance(lowercase , lowercase ):
[recursive_check(lowercase , lowercase ) for la, la in zip(lowercase , lowercase )]
self.assertEqual(lowercase , lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowercase = tokenizer.batch_decode(lowercase , output_char_offsets=lowercase )
__lowercase = [tokenizer.decode(lowercase , output_char_offsets=lowercase ) for ids in sample_ids]
check_list_tuples_equal(lowercase , lowercase )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def snake_case__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def snake_case__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowercase = tokenizer.add_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size + len(lowercase ) )
__lowercase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowercase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowercase = tokenizer.add_special_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size_a + len(lowercase ) )
__lowercase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
__lowercase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(output["""text"""] , lowercase )
| 634 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : int = """encodec"""
def __init__( self : str , lowercase : str=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : int=24_000 , lowercase : str=1 , lowercase : Optional[Any]=False , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : Any=128 , lowercase : Tuple=32 , lowercase : Optional[Any]=1 , lowercase : Tuple=[8, 5, 4, 2] , lowercase : str="weight_norm" , lowercase : Union[str, Any]=7 , lowercase : Any=7 , lowercase : List[Any]=3 , lowercase : Optional[Any]=2 , lowercase : Dict=True , lowercase : Union[str, Any]="reflect" , lowercase : str=2 , lowercase : Union[str, Any]=2 , lowercase : Optional[Any]=1.0 , lowercase : Optional[int]=1_024 , lowercase : Tuple=None , lowercase : List[Any]=True , **lowercase : str , ) -> Optional[int]:
"""simple docstring"""
__lowercase = target_bandwidths
__lowercase = sampling_rate
__lowercase = audio_channels
__lowercase = normalize
__lowercase = chunk_length_s
__lowercase = overlap
__lowercase = hidden_size
__lowercase = num_filters
__lowercase = num_residual_layers
__lowercase = upsampling_ratios
__lowercase = norm_type
__lowercase = kernel_size
__lowercase = last_kernel_size
__lowercase = residual_kernel_size
__lowercase = dilation_growth_rate
__lowercase = use_causal_conv
__lowercase = pad_mode
__lowercase = compress
__lowercase = num_lstm_layers
__lowercase = trim_right_ratio
__lowercase = codebook_size
__lowercase = codebook_dim if codebook_dim is not None else hidden_size
__lowercase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowercase )
@property
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 700 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 634 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowercase )
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self._create_example_records()
__lowercase = Dataset.from_list(lowercase )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowercase ):
self.assertDictEqual(lowercase , example_records[i] )
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self._create_example_records()
__lowercase = Dataset.from_list(lowercase )
__lowercase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def snake_case__ ( self : Tuple ) -> str: # checks what happens with missing columns
"""simple docstring"""
__lowercase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowercase = Dataset.from_list(lowercase )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def snake_case__ ( self : int ) -> List[str]: # checks if the type can be inferred from the second record
"""simple docstring"""
__lowercase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowercase = Dataset.from_list(lowercase )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def snake_case__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = Dataset.from_list([] )
self.assertEqual(len(lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 701 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = LxmertTokenizer
lowercase__ : List[str] = LxmertTokenizerFast
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , lowercase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def snake_case__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
| 634 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase__ ( lowercase__ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowercase = model_type_to_module_name(lowercase__ )
__lowercase = importlib.import_module(F".{module_name}" , """transformers.models""" )
try:
return getattr(lowercase__ , lowercase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowercase__ , """__name__""" , lowercase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowercase = importlib.import_module("""transformers""" )
if hasattr(lowercase__ , lowercase__ ):
return getattr(lowercase__ , lowercase__ )
return None
def UpperCAmelCase__ ( lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , **lowercase__ , ):
__lowercase = get_file_from_repo(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowercase__ , encoding="""utf-8""" ) as reader:
return json.load(lowercase__ )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(lowercase )
def snake_case__ ( cls : str , lowercase : str , **lowercase : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = kwargs.pop("""config""" , lowercase )
__lowercase = kwargs.pop("""trust_remote_code""" , lowercase )
__lowercase = True
__lowercase , __lowercase = FeatureExtractionMixin.get_feature_extractor_dict(lowercase , **lowercase )
__lowercase = config_dict.get("""feature_extractor_type""" , lowercase )
__lowercase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase , lowercase ):
__lowercase = AutoConfig.from_pretrained(lowercase , **lowercase )
# It could be in `config.feature_extractor_type``
__lowercase = getattr(lowercase , """feature_extractor_type""" , lowercase )
if hasattr(lowercase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
__lowercase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
__lowercase = feature_extractor_class_from_name(lowercase )
__lowercase = feature_extractor_auto_map is not None
__lowercase = feature_extractor_class is not None or type(lowercase ) in FEATURE_EXTRACTOR_MAPPING
__lowercase = resolve_trust_remote_code(
lowercase , lowercase , lowercase , lowercase )
if has_remote_code and trust_remote_code:
__lowercase = get_class_from_dynamic_module(
lowercase , lowercase , **lowercase )
__lowercase = kwargs.pop("""code_revision""" , lowercase )
if os.path.isdir(lowercase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase , **lowercase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase , **lowercase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase ) in FEATURE_EXTRACTOR_MAPPING:
__lowercase = FEATURE_EXTRACTOR_MAPPING[type(lowercase )]
return feature_extractor_class.from_dict(lowercase , **lowercase )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def snake_case__ ( lowercase : List[str] , lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowercase , lowercase )
| 702 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
__lowercase = len(lowercase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """yolos"""
def __init__( self : Optional[int] , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : str=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-1_2 , lowercase : Tuple=[512, 864] , lowercase : Optional[int]=16 , lowercase : Dict=3 , lowercase : Optional[Any]=True , lowercase : Optional[int]=100 , lowercase : Optional[int]=True , lowercase : Any=False , lowercase : Any=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Union[str, Any]=5 , lowercase : str=2 , lowercase : Tuple=0.1 , **lowercase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = version.parse("""1.11""" )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 634 | 0 |
from torch import nn
def UpperCAmelCase__ ( lowercase__ ) -> Tuple:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 704 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = IFImgaImgSuperResolutionPipeline
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowercase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self : List[str] , lowercase : Optional[int] , lowercase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Dict ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
__lowercase = _modexpt(lowercase__ , exponent // 2 , lowercase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase__ , exponent - 1 , lowercase__ )) % modulo_value
def UpperCAmelCase__ ( lowercase__ = 1_777 , lowercase__ = 1_855 , lowercase__ = 8 ) -> int:
__lowercase = base
for _ in range(1 , lowercase__ ):
__lowercase = _modexpt(lowercase__ , lowercase__ , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
import numpy
# List of input, output pairs
UpperCamelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase__ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase__ = [2, 4, 1, 5]
UpperCamelCase__ = len(train_data)
UpperCamelCase__ = 0.009
def UpperCAmelCase__ ( lowercase__ , lowercase__="train" ) -> Optional[int]:
return calculate_hypothesis_value(lowercase__ , lowercase__ ) - output(
lowercase__ , lowercase__ )
def UpperCAmelCase__ ( lowercase__ ) -> Union[str, Any]:
__lowercase = 0
for i in range(len(lowercase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Dict:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase__ ( lowercase__ , lowercase__=m ) -> Union[str, Any]:
__lowercase = 0
for i in range(lowercase__ ):
if index == -1:
summation_value += _error(lowercase__ )
else:
summation_value += _error(lowercase__ ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase__ ( lowercase__ ) -> Optional[Any]:
__lowercase = summation_of_cost_derivative(lowercase__ , lowercase__ ) / m
return cost_derivative_value
def UpperCAmelCase__ ( ) -> Tuple:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowercase = 0.000002
__lowercase = 0
__lowercase = 0
while True:
j += 1
__lowercase = [0, 0, 0, 0]
for i in range(0 , len(lowercase__ ) ):
__lowercase = get_cost_derivative(i - 1 )
__lowercase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowercase__ , lowercase__ , atol=lowercase__ , rtol=lowercase__ , ):
break
__lowercase = temp_parameter_vector
print(("""Number of iterations:""", j) )
def UpperCAmelCase__ ( ) -> Any:
for i in range(len(lowercase__ ) ):
print(("""Actual output value:""", output(lowercase__ , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowercase__ , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 706 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import math
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , lowercase : List[str]=0 ) -> str: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__lowercase = n
__lowercase = [
[math.inf for j in range(0 , lowercase )] for i in range(0 , lowercase )
] # adjacency matrix for weight
__lowercase = [
[math.inf for j in range(0 , lowercase )] for i in range(0 , lowercase )
] # dp[i][j] stores minimum distance from i to j
def snake_case__ ( self : Dict , lowercase : Tuple , lowercase : Any , lowercase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = w
def snake_case__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__lowercase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def snake_case__ ( self : int , lowercase : str , lowercase : int ) -> Any:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 707 |
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__ = list[list[float | int]]
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Matrix:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for row in range(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = matrix[row][col]
__lowercase = vector[row][0]
__lowercase = 0
__lowercase = 0
while row < size and col < size:
# pivoting
__lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase__ , lowercase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowercase , __lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase__ ):
__lowercase = augmented[rowa][col] / augmented[row][col]
__lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase__ ):
for row in range(lowercase__ ):
__lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowercase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase__ )
]
def UpperCAmelCase__ ( lowercase__ ) -> Callable[[int], int]:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
__lowercase = [[0] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for x_val, y_val in enumerate(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = (x_val + 1) ** (size - col - 1)
__lowercase = y_val
__lowercase = solve(lowercase__ , lowercase__ )
def interpolated_func(lowercase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowercase__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase__ ( lowercase__ = question_function , lowercase__ = 10 ) -> int:
__lowercase = [func(lowercase__ ) for x_val in range(1 , order + 1 )]
__lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowercase = 0
__lowercase = 42
__lowercase = 42
for poly in polynomials:
__lowercase = 1
while func(lowercase__ ) == poly(lowercase__ ):
x_val += 1
ret += poly(lowercase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowercase : List[str] , lowercase : Optional[int]=13 , lowercase : List[Any]=7 , lowercase : int=True , lowercase : Any=True , lowercase : int=True , lowercase : Dict=True , lowercase : Any=99 , lowercase : List[Any]=32 , lowercase : Optional[int]=5 , lowercase : Union[str, Any]=4 , lowercase : Union[str, Any]=37 , lowercase : Dict="gelu" , lowercase : Tuple=0.1 , lowercase : Optional[int]=0.1 , lowercase : Union[str, Any]=512 , lowercase : List[Any]=16 , lowercase : Optional[int]=2 , lowercase : Optional[Any]=0.02 , lowercase : int=4 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def snake_case__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = True
lowercase__ : Optional[int] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = FlaxRobertaModelTester(self )
@slow
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""roberta-base""" , from_pt=lowercase )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ ) -> int:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Input value must be an 'int' type""" )
__lowercase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import unittest
import numpy as np
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
if shape_a[0] != shape_b[0]:
__lowercase = (
"""Expected the same number of rows for A and B. """
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(lowercase__ )
if shape_b[1] != shape_c[1]:
__lowercase = (
"""Expected the same number of columns for B and C. """
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(lowercase__ )
__lowercase = pseudo_inv
if a_inv is None:
try:
__lowercase = np.linalg.inv(lowercase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
__lowercase = schur_complement(lowercase , lowercase , lowercase )
__lowercase = np.block([[a, b], [b.T, c]] )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
self.assertAlmostEqual(lowercase , det_a * det_s )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ ) -> bool:
return str(lowercase__ ) == str(lowercase__ )[::-1]
def UpperCAmelCase__ ( lowercase__ ) -> int:
return int(lowercase__ ) + int(str(lowercase__ )[::-1] )
def UpperCAmelCase__ ( lowercase__ = 10_000 ) -> int:
__lowercase = []
for num in range(1 , lowercase__ ):
__lowercase = 0
__lowercase = num
while iterations < 50:
__lowercase = sum_reverse(lowercase__ )
iterations += 1
if is_palindrome(lowercase__ ):
break
else:
lychrel_nums.append(lowercase__ )
return len(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 710 |
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = False ) -> dict:
__lowercase = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def UpperCAmelCase__ ( lowercase__ ) -> dict:
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Any = """blip_text_model"""
def __init__( self : Optional[Any] , lowercase : str=30_524 , lowercase : List[str]=768 , lowercase : int=768 , lowercase : int=3_072 , lowercase : List[Any]=768 , lowercase : Optional[int]=12 , lowercase : str=8 , lowercase : List[Any]=512 , lowercase : str="gelu" , lowercase : int=1E-1_2 , lowercase : str=0.0 , lowercase : Optional[Any]=0.0 , lowercase : Optional[int]=0.02 , lowercase : List[Any]=30_522 , lowercase : Dict=2 , lowercase : List[Any]=0 , lowercase : Union[str, Any]=102 , lowercase : Optional[Any]=True , lowercase : Optional[int]=True , **lowercase : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , sep_token_id=lowercase , **lowercase , )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = encoder_hidden_size
__lowercase = intermediate_size
__lowercase = projection_dim
__lowercase = hidden_dropout_prob
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = max_position_embeddings
__lowercase = layer_norm_eps
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = attention_probs_dropout_prob
__lowercase = is_decoder
__lowercase = use_cache
@classmethod
def snake_case__ ( cls : List[str] , lowercase : Union[str, os.PathLike] , **lowercase : List[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowercase )
__lowercase , __lowercase = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
__lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Tuple = """blip_vision_model"""
def __init__( self : int , lowercase : List[Any]=768 , lowercase : Optional[Any]=3_072 , lowercase : Optional[int]=512 , lowercase : int=12 , lowercase : Dict=12 , lowercase : List[Any]=384 , lowercase : List[Any]=16 , lowercase : Dict="gelu" , lowercase : Optional[Any]=1E-5 , lowercase : Dict=0.0 , lowercase : Optional[Any]=1E-1_0 , **lowercase : Any , ) -> Tuple:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = projection_dim
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
@classmethod
def snake_case__ ( cls : Any , lowercase : Union[str, os.PathLike] , **lowercase : str ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowercase )
__lowercase , __lowercase = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
__lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : int = """blip"""
lowercase__ : Tuple = True
def __init__( self : int , lowercase : List[str]=None , lowercase : Union[str, Any]=None , lowercase : Optional[Any]=512 , lowercase : Union[str, Any]=2.6592 , lowercase : Dict=256 , **lowercase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowercase )
if text_config is None:
__lowercase = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
__lowercase = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
__lowercase = BlipTextConfig(**lowercase )
__lowercase = BlipVisionConfig(**lowercase )
__lowercase = self.vision_config.hidden_size
__lowercase = projection_dim
__lowercase = logit_scale_init_value
__lowercase = 1.0
__lowercase = 0.02
__lowercase = image_text_hidden_size
@classmethod
def snake_case__ ( cls : str , lowercase : BlipTextConfig , lowercase : BlipVisionConfig , **lowercase : Any ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 711 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase__ = random.Random()
def UpperCAmelCase__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> str:
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Tuple , lowercase : Union[str, Any]=7 , lowercase : List[Any]=400 , lowercase : Any=2_000 , lowercase : Optional[int]=24 , lowercase : Any=24 , lowercase : List[str]=0.0 , lowercase : Dict=16_000 , lowercase : Union[str, Any]=True , lowercase : Dict=True , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = num_mel_bins
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : List[str] , lowercase : Tuple=False , lowercase : int=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(lowercase : Optional[Any] ):
return list(itertools.chain(*lowercase ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = SpeechaTextFeatureExtractor if is_speech_available() else None
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextFeatureExtractionTester(self )
def snake_case__ ( self : Tuple , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowercase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0 ) - 1 ) < 1E-3 ) )
def snake_case__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase , padding=lowercase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowercase = np.asarray(lowercase )
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , padding=lowercase , max_length=lowercase , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , max_length=lowercase , padding=lowercase , return_tensors="""np""" , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""max_length""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=16 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self : Optional[int] , lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
from datasets import load_dataset
__lowercase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__lowercase = ds.sort("""id""" ).select(range(lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = feature_extractor(lowercase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase , atol=1E-4 ) )
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ ) -> list[int]:
if length <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(lowercase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 712 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
__lowercase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase__ ( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import math
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase__ = "Enter the base and the power separated by a comma: "
UpperCamelCase__ , UpperCamelCase__ = map(int, input(prompt).split(","))
UpperCamelCase__ , UpperCamelCase__ = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase__ = res(xa, ya)
UpperCamelCase__ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 713 |
def UpperCAmelCase__ ( lowercase__ = 100 ) -> int:
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ) -> Any:
'''simple docstring'''
assert masked_input.count("""<mask>""" ) == 1
__lowercase = torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
__lowercase = model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
__lowercase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowercase = logits[0, masked_index, :]
__lowercase = logits.softmax(dim=0 )
__lowercase , __lowercase = prob.topk(k=lowercase__ , dim=0 )
__lowercase = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
__lowercase = tokenizer.mask_token
__lowercase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
__lowercase = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCamelCase__ = CamembertTokenizer.from_pretrained("camembert-base")
UpperCamelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
UpperCamelCase__ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 714 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="dummy_doc" ) -> str:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__lowercase = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
__lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[int] , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int=False , lowercase : Dict=False ) -> str:
"""simple docstring"""
__lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 634 | 0 |
import random
def UpperCAmelCase__ ( lowercase__ ) -> bool:
__lowercase = num - 1
__lowercase = 0
while s % 2 == 0:
__lowercase = s // 2
t += 1
for _ in range(5 ):
__lowercase = random.randrange(2 , num - 1 )
__lowercase = pow(lowercase__ , lowercase__ , lowercase__ )
if v != 1:
__lowercase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowercase = i + 1
__lowercase = (v**2) % num
return True
def UpperCAmelCase__ ( lowercase__ ) -> bool:
if num < 2:
return False
__lowercase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase__ )
def UpperCAmelCase__ ( lowercase__ = 1_024 ) -> int:
while True:
__lowercase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase__ ):
return num
if __name__ == "__main__":
UpperCamelCase__ = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 715 |
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(lowercase__ )}"
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCAmelCase__ ( ) -> List[str]:
__lowercase , __lowercase = 9, 14 # noqa: F841
__lowercase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowercase = defaultdict(lowercase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowercase = mst(lowercase__ )
__lowercase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowercase = tuple(answer[:2] )
__lowercase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 716 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
lowercase__ : str = "text"
lowercase__ : str = "summary"
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 634 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCamelCase__ = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
UpperCamelCase__ = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
UpperCamelCase__ = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Optional[Any]:
return float((preds == labels).mean() )
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> str:
__lowercase = simple_accuracy(lowercase__ , lowercase__ )
__lowercase = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> List[str]:
__lowercase = float(pearsonr(lowercase__ , lowercase__ )[0] )
__lowercase = float(spearmanr(lowercase__ , lowercase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : int ) -> str:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 717 |
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
__lowercase = len(lowercase__ )
__lowercase = sum(lowercase__ )
__lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowercase = True
for i in range(1 , s + 1 ):
__lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
__lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowercase = s - 2 * j
break
return diff
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ = 1_000_000 ) -> int:
__lowercase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 718 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 634 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
UpperCamelCase__ = Mapping[str, np.ndarray]
UpperCamelCase__ = Mapping[str, Any] # Is a nested dict.
UpperCamelCase__ = 0.01
@dataclasses.dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowercase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowercase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowercase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowercase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowercase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowercase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowercase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowercase__ : Optional[Sequence[int]] = None
def UpperCAmelCase__ ( lowercase__ ) -> Protein:
__lowercase = r"""(\[[A-Z]+\]\n)"""
__lowercase = [tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0]
__lowercase = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
__lowercase = ["""N""", """CA""", """C"""]
__lowercase = None
__lowercase = None
__lowercase = None
for g in groups:
if "[PRIMARY]" == g[0]:
__lowercase = g[1][0].strip()
for i in range(len(lowercase__ ) ):
if seq[i] not in residue_constants.restypes:
__lowercase = """X""" # FIXME: strings are immutable
__lowercase = np.array(
[residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__lowercase = []
for axis in range(3 ):
tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) )
__lowercase = np.array(lowercase__ )
__lowercase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
__lowercase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__lowercase = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
__lowercase = np.zeros(
(
len(lowercase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
__lowercase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , )
def UpperCAmelCase__ ( lowercase__ , lowercase__ = 0 ) -> List[str]:
__lowercase = []
__lowercase = prot.remark
if remark is not None:
pdb_headers.append(F"REMARK {remark}" )
__lowercase = prot.parents
__lowercase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__lowercase = [p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id]
if parents is None or len(lowercase__ ) == 0:
__lowercase = ["""N/A"""]
pdb_headers.append(F"PARENT {' '.join(lowercase__ )}" )
return pdb_headers
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> str:
__lowercase = []
__lowercase = pdb_str.split("""\n""" )
__lowercase = prot.remark
if remark is not None:
out_pdb_lines.append(F"REMARK {remark}" )
__lowercase = 42
if prot.parents is not None and len(prot.parents ) > 0:
__lowercase = []
if prot.parents_chain_index is not None:
__lowercase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase__ ) , [] )
parent_dict[str(lowercase__ )].append(lowercase__ )
__lowercase = max([int(lowercase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__lowercase = parent_dict.get(str(lowercase__ ) , ["""N/A"""] )
parents_per_chain.append(lowercase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__lowercase = [["""N/A"""]]
def make_parent_line(lowercase__ ) -> str:
return F"PARENT {' '.join(lowercase__ )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__lowercase = 0
for i, l in enumerate(lowercase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase__ ):
__lowercase = parents_per_chain[chain_counter]
else:
__lowercase = ["""N/A"""]
out_pdb_lines.append(make_parent_line(lowercase__ ) )
return "\n".join(lowercase__ )
def UpperCAmelCase__ ( lowercase__ ) -> str:
__lowercase = residue_constants.restypes + ["""X"""]
def res_atoa(lowercase__ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
__lowercase = residue_constants.atom_types
__lowercase = []
__lowercase = prot.atom_mask
__lowercase = prot.aatype
__lowercase = prot.atom_positions
__lowercase = prot.residue_index.astype(np.intaa )
__lowercase = prot.b_factors
__lowercase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
__lowercase = get_pdb_headers(lowercase__ )
if len(lowercase__ ) > 0:
pdb_lines.extend(lowercase__ )
__lowercase = aatype.shape[0]
__lowercase = 1
__lowercase = 0
__lowercase = string.ascii_uppercase
__lowercase = None
# Add all atom sites.
for i in range(lowercase__ ):
__lowercase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__lowercase = """ATOM"""
__lowercase = atom_name if len(lowercase__ ) == 4 else F" {atom_name}"
__lowercase = """"""
__lowercase = """"""
__lowercase = 1.00
__lowercase = atom_name[0] # Protein supports only C, N, O, S, this works.
__lowercase = """"""
__lowercase = """A"""
if chain_index is not None:
__lowercase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__lowercase = (
F"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
F"{res_name_a:>3} {chain_tag:>1}"
F"{residue_index[i]:>4}{insertion_code:>1} "
F"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
F"{occupancy:>6.2f}{b_factor:>6.2f} "
F"{element:>2}{charge:>2}"
)
pdb_lines.append(lowercase__ )
atom_index += 1
__lowercase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__lowercase = True
__lowercase = chain_index[i + 1]
if should_terminate:
# Close the chain.
__lowercase = """TER"""
__lowercase = (
F"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(lowercase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(lowercase__ )
def UpperCAmelCase__ ( lowercase__ ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , ) -> Protein:
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
| 719 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCamelCase__ = "scheduler_config.json"
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[Any] = 1
lowercase__ : Tuple = 2
lowercase__ : Union[str, Any] = 3
lowercase__ : Union[str, Any] = 4
lowercase__ : str = 5
lowercase__ : Any = 6
lowercase__ : Any = 7
lowercase__ : List[str] = 8
lowercase__ : Union[str, Any] = 9
lowercase__ : int = 10
lowercase__ : List[str] = 11
lowercase__ : List[Any] = 12
lowercase__ : str = 13
lowercase__ : Optional[int] = 14
@dataclass
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : torch.FloatTensor
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : Optional[int] = SCHEDULER_CONFIG_NAME
lowercase__ : int = []
lowercase__ : Dict = True
@classmethod
def snake_case__ ( cls : str , lowercase : Dict[str, Any] = None , lowercase : Optional[str] = None , lowercase : Any=False , **lowercase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def snake_case__ ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = False , **lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def snake_case__ ( cls : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split(""".""" )[0] )
__lowercase = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 634 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 720 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """sew"""
def __init__( self : List[Any] , lowercase : int=32 , lowercase : List[str]=768 , lowercase : Dict=12 , lowercase : str=12 , lowercase : str=3_072 , lowercase : Optional[int]=2 , lowercase : List[str]="gelu" , lowercase : List[str]=0.1 , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : Any=0.0 , lowercase : Dict=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[str]=0.02 , lowercase : Dict=1E-5 , lowercase : Tuple="group" , lowercase : int="gelu" , lowercase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase : List[str]=False , lowercase : Tuple=128 , lowercase : int=16 , lowercase : Union[str, Any]=True , lowercase : List[str]=0.05 , lowercase : Optional[int]=10 , lowercase : Any=2 , lowercase : Optional[Any]=0.0 , lowercase : Optional[Any]=10 , lowercase : int=0 , lowercase : Optional[int]="mean" , lowercase : List[Any]=False , lowercase : str=False , lowercase : int=256 , lowercase : str=0 , lowercase : List[Any]=1 , lowercase : List[Any]=2 , **lowercase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = squeeze_factor
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# sequence classification
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
@property
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 634 | 0 |
UpperCamelCase__ = "Alexander Joslin"
import operator as op
from .stack import Stack
def UpperCAmelCase__ ( lowercase__ ) -> int:
__lowercase = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
__lowercase = Stack()
__lowercase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase__ )
elif i == ")":
# RULE 4
__lowercase = operator_stack.peek()
operator_stack.pop()
__lowercase = operand_stack.peek()
operand_stack.pop()
__lowercase = operand_stack.peek()
operand_stack.pop()
__lowercase = operators[opr](lowercase__ , lowercase__ )
operand_stack.push(lowercase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCamelCase__ = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 721 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = WavaVecaPhonemeCTCTokenizer
lowercase__ : Optional[int] = False
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
super().setUp()
__lowercase = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
def snake_case__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[str]=False , lowercase : List[str]=20 , lowercase : str=5 ) -> Tuple[str, list]:
"""simple docstring"""
__lowercase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )) for i in range(len(lowercase ) )]
__lowercase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
__lowercase = """ """ + output_txt
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def snake_case__ ( self : Tuple , **lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
__lowercase = tokenizer("""m xxx ɪ""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
__lowercase = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__lowercase = tokenizer("""maɪ c""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [3, 200] ) # mai should be <unk> (=3)
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def snake_case__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase )
__lowercase = tokenizer.batch_decode(lowercase , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowercase )
__lowercase = """Hello how are you"""
__lowercase = tokenizer(lowercase , phonemizer_lang="""en-us""" ).input_ids
__lowercase = tokenizer(lowercase , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowercase , lowercase )
__lowercase = tokenizer.decode(lowercase )
__lowercase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowercase , """ɛ l o h aʊ a ʁ j u""" )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how Are you"""
__lowercase = """hello how are you"""
__lowercase = tokenizer(lowercase ).input_ids
__lowercase = tokenizer(lowercase ).input_ids
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def snake_case__ ( lowercase : List[str] , lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowercase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__lowercase = tokenizer.decode(lowercase , output_char_offsets=lowercase , filter_word_delimiter_token=lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowercase , lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowercase : List[str] , lowercase : Dict ):
self.assertTrue(isinstance(lowercase , lowercase ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase ) )
# transform list to ModelOutput
__lowercase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowercase : List[Any] , lowercase : Optional[int] ):
if isinstance(lowercase , lowercase ):
[recursive_check(lowercase , lowercase ) for la, la in zip(lowercase , lowercase )]
self.assertEqual(lowercase , lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowercase = tokenizer.batch_decode(lowercase , output_char_offsets=lowercase )
__lowercase = [tokenizer.decode(lowercase , output_char_offsets=lowercase ) for ids in sample_ids]
check_list_tuples_equal(lowercase , lowercase )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def snake_case__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def snake_case__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowercase = tokenizer.add_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size + len(lowercase ) )
__lowercase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowercase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowercase = tokenizer.add_special_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size_a + len(lowercase ) )
__lowercase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
__lowercase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(output["""text"""] , lowercase )
| 634 | 0 |
from __future__ import annotations
def UpperCAmelCase__ ( lowercase__ ) -> list:
if len(lowercase__ ) == 0:
return []
__lowercase , __lowercase = min(lowercase__ ), max(lowercase__ )
__lowercase = int(max_value - min_value ) + 1
__lowercase = [[] for _ in range(lowercase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowercase__ )
return [v for bucket in buckets for v in sorted(lowercase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 700 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 634 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = "x" , lowercase__ = 10**-10 , lowercase__ = 1 , ) -> complex:
__lowercase = symbols(lowercase__ )
__lowercase = lambdify(lowercase__ , lowercase__ )
__lowercase = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) )
__lowercase = starting_point
while True:
if diff_function(lowercase__ ) != 0:
__lowercase = prev_guess - multiplicity * func(lowercase__ ) / diff_function(
lowercase__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowercase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 701 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = LxmertTokenizer
lowercase__ : List[str] = LxmertTokenizerFast
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , lowercase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def snake_case__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
| 634 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
UpperCamelCase__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowercase__ ):
__lowercase = git.Repo(search_parent_directories=lowercase__ )
__lowercase = {
"""repo_id""": str(lowercase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase__ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ , indent=4 )
def UpperCAmelCase__ ( lowercase__ ):
if params.n_gpu <= 0:
__lowercase = 0
__lowercase = -1
__lowercase = True
__lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
__lowercase = int(os.environ["""WORLD_SIZE"""] )
__lowercase = int(os.environ["""N_GPU_NODE"""] )
__lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
__lowercase = params.world_size // params.n_gpu_per_node
__lowercase = params.global_rank // params.n_gpu_per_node
__lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
__lowercase = 1
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 1
__lowercase = 1
__lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__lowercase = params.node_id == 0 and params.local_rank == 0
__lowercase = params.n_nodes > 1
# summary
__lowercase = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def UpperCAmelCase__ ( lowercase__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 702 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
__lowercase = len(lowercase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
import argparse
import os
import re
UpperCamelCase__ = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCamelCase__ = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
UpperCamelCase__ = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def UpperCAmelCase__ ( lowercase__ , lowercase__ = False ) -> int:
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
__lowercase = f.read()
__lowercase = content.split("""\n""" )
__lowercase = []
__lowercase = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__lowercase = len(re.search(r"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__lowercase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__lowercase = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__lowercase = sorted(lowercase__ , key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def UpperCAmelCase__ ( lowercase__ = False ) -> List[str]:
__lowercase = [os.path.join(lowercase__ , lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(""".py""" )]
__lowercase = [sort_auto_mapping(lowercase__ , overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
__lowercase = [f for f, d in zip(lowercase__ , lowercase__ ) if d]
raise ValueError(
F"The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"
""" this.""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
UpperCamelCase__ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 703 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """yolos"""
def __init__( self : Optional[int] , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : str=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-1_2 , lowercase : Tuple=[512, 864] , lowercase : Optional[int]=16 , lowercase : Dict=3 , lowercase : Optional[Any]=True , lowercase : Optional[int]=100 , lowercase : Optional[int]=True , lowercase : Any=False , lowercase : Any=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Union[str, Any]=5 , lowercase : str=2 , lowercase : Tuple=0.1 , **lowercase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = version.parse("""1.11""" )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 634 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="dummy_doc" ) -> str:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__lowercase = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
__lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[int] , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int=False , lowercase : Dict=False ) -> str:
"""simple docstring"""
__lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 704 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = IFImgaImgSuperResolutionPipeline
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowercase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self : List[str] , lowercase : Optional[int] , lowercase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Dict ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 634 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowercase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(lowercase ) - 1
def snake_case__ ( self : Optional[Any] , lowercase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowercase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase ) , 5 ) == 1
return output_values
def snake_case__ ( self : List[str] , lowercase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(lowercase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case__ ( self : Optional[Any] , lowercase : float = 0.01 ) -> Any:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(lowercase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
lowercase , lowercase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(lowercase , lowercase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
def UpperCAmelCase__ ( lowercase__ , lowercase__=100 , lowercase__=" " ) -> List[str]:
__lowercase = text.split(lowercase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase__ ) , lowercase__ )]
def UpperCAmelCase__ ( lowercase__ ) -> dict:
__lowercase , __lowercase = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(lowercase__ ):
titles.append(title if title is not None else """""" )
texts.append(lowercase__ )
return {"title": titles, "text": texts}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> dict:
__lowercase = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=lowercase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
__lowercase = ctx_encoder(input_ids.to(device=lowercase__ ) , return_dict=lowercase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , ) -> List[str]:
######################################
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__lowercase = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__lowercase = dataset.map(lowercase__ , batched=lowercase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
__lowercase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase__ )
__lowercase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__lowercase = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
__lowercase = dataset.map(
partial(lowercase__ , ctx_encoder=lowercase__ , ctx_tokenizer=lowercase__ ) , batched=lowercase__ , batch_size=processing_args.batch_size , features=lowercase__ , )
# And finally save your dataset
__lowercase = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(lowercase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__lowercase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=lowercase__ )
# And save the index
__lowercase = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(lowercase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = field(
default=str(Path(_UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
lowercase__ : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
lowercase__ : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
lowercase__ : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
lowercase__ : Optional[str] = field(
default=str(Path(_UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
lowercase__ : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
lowercase__ : int = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 706 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
UpperCamelCase__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def UpperCAmelCase__ ( lowercase__ ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowercase__ , lowercase__ ):
__lowercase = F"a bytes-like object is required, not '{data.__class__.__name__}'"
raise TypeError(lowercase__ )
__lowercase = """""".join(bin(lowercase__ )[2:].zfill(8 ) for byte in data )
__lowercase = len(lowercase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowercase = B"""=""" * ((6 - len(lowercase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase__ ) % 6)
else:
__lowercase = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase__ ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase__ ( lowercase__ ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowercase__ , lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
__lowercase = (
"""argument should be a bytes-like object or ASCII string, """
F"not '{encoded_data.__class__.__name__}'"
)
raise TypeError(lowercase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase__ , lowercase__ ):
try:
__lowercase = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__lowercase = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowercase = encoded_data[:-padding]
__lowercase = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowercase = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )
__lowercase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase__ ) , 8 )
]
return bytes(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__ = list[list[float | int]]
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Matrix:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for row in range(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = matrix[row][col]
__lowercase = vector[row][0]
__lowercase = 0
__lowercase = 0
while row < size and col < size:
# pivoting
__lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase__ , lowercase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowercase , __lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase__ ):
__lowercase = augmented[rowa][col] / augmented[row][col]
__lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase__ ):
for row in range(lowercase__ ):
__lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowercase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase__ )
]
def UpperCAmelCase__ ( lowercase__ ) -> Callable[[int], int]:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
__lowercase = [[0] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for x_val, y_val in enumerate(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = (x_val + 1) ** (size - col - 1)
__lowercase = y_val
__lowercase = solve(lowercase__ , lowercase__ )
def interpolated_func(lowercase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowercase__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase__ ( lowercase__ = question_function , lowercase__ = 10 ) -> int:
__lowercase = [func(lowercase__ ) for x_val in range(1 , order + 1 )]
__lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowercase = 0
__lowercase = 42
__lowercase = 42
for poly in polynomials:
__lowercase = 1
while func(lowercase__ ) == poly(lowercase__ ):
x_val += 1
ret += poly(lowercase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 0 |
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> tuple:
__lowercase , __lowercase , __lowercase = [], [], []
for element in data:
if element < pivot:
less.append(lowercase__ )
elif element > pivot:
greater.append(lowercase__ )
else:
equal.append(lowercase__ )
return less, equal, greater
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Dict:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(lowercase__ ) or index < 0:
return None
__lowercase = items[random.randint(0 , len(lowercase__ ) - 1 )]
__lowercase = 0
__lowercase , __lowercase , __lowercase = _partition(lowercase__ , lowercase__ )
__lowercase = len(lowercase__ )
__lowercase = len(lowercase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowercase__ , lowercase__ )
# must be in larger
else:
return quick_select(lowercase__ , index - (m + count) )
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any]=13 , lowercase : Tuple=10 , lowercase : List[str]=3 , lowercase : Dict=2 , lowercase : Any=2 , lowercase : Optional[int]=True , lowercase : Optional[int]=True , lowercase : Union[str, Any]=32 , lowercase : Any=5 , lowercase : Optional[Any]=4 , lowercase : Union[str, Any]=37 , lowercase : int="gelu" , lowercase : Optional[int]=0.1 , lowercase : int=0.1 , lowercase : List[str]=10 , lowercase : int=0.02 , lowercase : int="divided_space_time" , lowercase : int=None , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = num_frames
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = attention_type
__lowercase = initializer_range
__lowercase = scope
__lowercase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowercase = (image_size // patch_size) ** 2
__lowercase = (num_frames) * self.num_patches_per_frame + 1
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowercase = self.num_labels
return config
def snake_case__ ( self : Dict , lowercase : str , lowercase : Tuple , lowercase : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TimesformerModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : str , lowercase : List[str] , lowercase : List[Any] , lowercase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = TimesformerForVideoClassification(lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify the logits shape
__lowercase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase )
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : str = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__ : Any = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ : List[str] = False
lowercase__ : Any = False
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[int] = False
def snake_case__ ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = TimesformerModelTester(self )
__lowercase = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def snake_case__ ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
__lowercase = copy.deepcopy(lowercase )
if return_labels:
if model_class in get_values(lowercase ):
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def snake_case__ ( self : List[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def snake_case__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase )
@slow
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TimesformerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def snake_case__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
__lowercase = self.model_tester.seq_length
__lowercase = self.model_tester.num_frames
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowercase = len(lowercase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 1 , len(lowercase ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(lowercase : List[Any] , lowercase : Dict , lowercase : Union[str, Any] ):
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase ) , lowercase )
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def UpperCAmelCase__ ( ) -> List[Any]:
__lowercase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowercase = np.load(lowercase__ )
return list(lowercase__ )
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase )
# verify the logits
__lowercase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase )
__lowercase = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 709 |
import unittest
import numpy as np
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
if shape_a[0] != shape_b[0]:
__lowercase = (
"""Expected the same number of rows for A and B. """
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(lowercase__ )
if shape_b[1] != shape_c[1]:
__lowercase = (
"""Expected the same number of columns for B and C. """
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(lowercase__ )
__lowercase = pseudo_inv
if a_inv is None:
try:
__lowercase = np.linalg.inv(lowercase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
__lowercase = schur_complement(lowercase , lowercase , lowercase )
__lowercase = np.block([[a, b], [b.T, c]] )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
self.assertAlmostEqual(lowercase , det_a * det_s )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 634 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
lowercase__ : str = "text"
lowercase__ : str = "summary"
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 710 |
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = False ) -> dict:
__lowercase = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def UpperCAmelCase__ ( lowercase__ ) -> dict:
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
from typing import Any
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> list:
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
__lowercase = {}
__lowercase = {}
for state in states_space:
__lowercase = observations_space[0]
__lowercase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__lowercase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
__lowercase = observations_space[o]
__lowercase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__lowercase = """"""
__lowercase = -1
for k_state in states_space:
__lowercase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__lowercase = probability
__lowercase = k_state
# Update probabilities and pointers dicts
__lowercase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__lowercase = arg_max
# The final observation
__lowercase = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
__lowercase = """"""
__lowercase = -1
for k_state in states_space:
__lowercase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__lowercase = probability
__lowercase = k_state
__lowercase = arg_max
# Process pointers backwards
__lowercase = last_state
__lowercase = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
__lowercase = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> None:
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> None:
_validate_list(lowercase__ , """observations_space""" )
_validate_list(lowercase__ , """states_space""" )
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> None:
if not isinstance(_object , lowercase__ ):
__lowercase = F"{var_name} must be a list"
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
__lowercase = F"{var_name} must be a list of strings"
raise ValueError(lowercase__ )
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , ) -> None:
_validate_dict(lowercase__ , """initial_probabilities""" , lowercase__ )
_validate_nested_dict(lowercase__ , """transition_probabilities""" )
_validate_nested_dict(lowercase__ , """emission_probabilities""" )
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> None:
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ) -> None:
if not isinstance(_object , lowercase__ ):
__lowercase = F"{var_name} must be a dict"
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
__lowercase = F"{var_name} all keys must be strings"
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
__lowercase = """nested dictionary """ if nested else """"""
__lowercase = F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase__ = random.Random()
def UpperCAmelCase__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> str:
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Tuple , lowercase : Union[str, Any]=7 , lowercase : List[Any]=400 , lowercase : Any=2_000 , lowercase : Optional[int]=24 , lowercase : Any=24 , lowercase : List[str]=0.0 , lowercase : Dict=16_000 , lowercase : Union[str, Any]=True , lowercase : Dict=True , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = num_mel_bins
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : List[str] , lowercase : Tuple=False , lowercase : int=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(lowercase : Optional[Any] ):
return list(itertools.chain(*lowercase ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = SpeechaTextFeatureExtractor if is_speech_available() else None
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextFeatureExtractionTester(self )
def snake_case__ ( self : Tuple , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowercase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0 ) - 1 ) < 1E-3 ) )
def snake_case__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase , padding=lowercase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowercase = np.asarray(lowercase )
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , padding=lowercase , max_length=lowercase , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , max_length=lowercase , padding=lowercase , return_tensors="""np""" , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""max_length""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=16 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self : Optional[int] , lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
from datasets import load_dataset
__lowercase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__lowercase = ds.sort("""id""" ).select(range(lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = feature_extractor(lowercase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase , atol=1E-4 ) )
| 634 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , *lowercase : Any , **lowercase : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , lowercase , )
super().__init__(*lowercase , **lowercase )
| 712 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
__lowercase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase__ ( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ ) -> list:
if len(lowercase__ ) <= 1:
return [tuple(lowercase__ )]
__lowercase = []
def generate(lowercase__ , lowercase__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__lowercase , __lowercase = arr[k - 1], arr[i]
else: # k is odd
__lowercase , __lowercase = arr[k - 1], arr[0]
generate(k - 1 , lowercase__ )
generate(len(lowercase__ ) , lowercase__ )
return res
if __name__ == "__main__":
UpperCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase__ = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 713 |
def UpperCAmelCase__ ( lowercase__ = 100 ) -> int:
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "spiece.model"}
UpperCamelCase__ = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowercase : int , lowercase : Dict=False , lowercase : int=True , lowercase : Optional[Any]=False , lowercase : Tuple="<s>" , lowercase : Tuple="</s>" , lowercase : Union[str, Any]="<unk>" , lowercase : str="<sep>" , lowercase : int="<pad>" , lowercase : Tuple="<cls>" , lowercase : Optional[int]="<mask>" , lowercase : Optional[Any]=["<eop>", "<eod>"] , lowercase : Optional[Dict[str, Any]] = None , **lowercase : Optional[Any] , ) -> None:
"""simple docstring"""
__lowercase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
__lowercase = 3
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
__lowercase = jieba
__lowercase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def snake_case__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : List[str] , lowercase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : str , lowercase : List[Any] ) -> List[Any]:
"""simple docstring"""
if self.remove_space:
__lowercase = """ """.join(inputs.strip().split() )
else:
__lowercase = inputs
__lowercase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__lowercase = unicodedata.normalize("""NFKD""" , lowercase )
__lowercase = """""".join([c for c in outputs if not unicodedata.combining(lowercase )] )
if self.do_lower_case:
__lowercase = outputs.lower()
return outputs
def snake_case__ ( self : List[Any] , lowercase : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.preprocess_text(lowercase )
__lowercase = self.sp_model.encode(lowercase , out_type=lowercase )
__lowercase = []
for piece in pieces:
if len(lowercase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase = cur_pieces[1:]
else:
__lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase )
else:
new_pieces.append(lowercase )
return new_pieces
def snake_case__ ( self : Any , lowercase : Dict ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(lowercase )
def snake_case__ ( self : str , lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase )
def snake_case__ ( self : Optional[int] , lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def snake_case__ ( self : List[Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self : Optional[int] , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1, 1]
return ([0] * len(lowercase )) + [1, 1]
def snake_case__ ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case__ ( self : Optional[int] , lowercase : str , lowercase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def snake_case__ ( self : Optional[int] , *lowercase : List[str] , **lowercase : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = super()._decode(*lowercase , **lowercase )
__lowercase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 714 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="dummy_doc" ) -> str:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__lowercase = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
__lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[int] , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int=False , lowercase : Dict=False ) -> str:
"""simple docstring"""
__lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 634 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowercase , """depth_multiplier""" ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Optional[int] , lowercase : Union[str, Any]=13 , lowercase : Any=3 , lowercase : Any=32 , lowercase : int=0.25 , lowercase : Dict=8 , lowercase : Dict=True , lowercase : Dict=1_024 , lowercase : Dict=32 , lowercase : Tuple="relu6" , lowercase : Optional[Any]=0.1 , lowercase : Any=0.02 , lowercase : str=True , lowercase : List[Any]=True , lowercase : List[str]=10 , lowercase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = depth_multiplier
__lowercase = min_depth
__lowercase = tf_padding
__lowercase = int(last_hidden_size * depth_multiplier )
__lowercase = output_stride
__lowercase = hidden_act
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def snake_case__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case__ ( self : List[str] , lowercase : Optional[Any] , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] ) -> Any:
"""simple docstring"""
__lowercase = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Tuple , lowercase : str , lowercase : str , lowercase : str , lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase__ : Tuple = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : str = False
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Tuple = False
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = MobileNetVaModelTester(self )
__lowercase = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def snake_case__ ( self : Tuple ) -> int:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def snake_case__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(lowercase : List[Any] , lowercase : Tuple , lowercase : Union[str, Any] ):
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = 26
self.assertEqual(len(lowercase ) , lowercase )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def snake_case__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def snake_case__ ( self : Dict ) -> Dict:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase__ ( ) -> Union[str, Any]:
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def snake_case__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(lowercase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase )
# verify the logits
__lowercase = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
__lowercase = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 715 |
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(lowercase__ )}"
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
lowercase__ : str = "text"
lowercase__ : str = "summary"
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 634 | 0 |
def UpperCAmelCase__ ( lowercase__ = 1_000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 717 |
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
__lowercase = len(lowercase__ )
__lowercase = sum(lowercase__ )
__lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowercase = True
for i in range(1 , s + 1 ):
__lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
__lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowercase = s - 2 * j
break
return diff
| 634 | 0 |
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(lowercase__ )}"
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 634 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCAmelCase__ ( lowercase__ = "laptop" ) -> DataFrame:
__lowercase = F"https://www.amazon.in/laptop/s?k={product}"
__lowercase = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
__lowercase = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
__lowercase = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
__lowercase = item.ha.text
__lowercase = """https://www.amazon.in/""" + item.ha.a["""href"""]
__lowercase = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
__lowercase = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
__lowercase = """Not available"""
try:
__lowercase = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
__lowercase = """"""
try:
__lowercase = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
__lowercase = float("""nan""" )
except AttributeError:
pass
__lowercase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowercase = """ """
__lowercase = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase__ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 719 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCamelCase__ = "scheduler_config.json"
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[Any] = 1
lowercase__ : Tuple = 2
lowercase__ : Union[str, Any] = 3
lowercase__ : Union[str, Any] = 4
lowercase__ : str = 5
lowercase__ : Any = 6
lowercase__ : Any = 7
lowercase__ : List[str] = 8
lowercase__ : Union[str, Any] = 9
lowercase__ : int = 10
lowercase__ : List[str] = 11
lowercase__ : List[Any] = 12
lowercase__ : str = 13
lowercase__ : Optional[int] = 14
@dataclass
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : torch.FloatTensor
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : Optional[int] = SCHEDULER_CONFIG_NAME
lowercase__ : int = []
lowercase__ : Dict = True
@classmethod
def snake_case__ ( cls : str , lowercase : Dict[str, Any] = None , lowercase : Optional[str] = None , lowercase : Any=False , **lowercase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def snake_case__ ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = False , **lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def snake_case__ ( cls : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split(""".""" )[0] )
__lowercase = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 634 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowercase : int , lowercase : int=13 , lowercase : Any=7 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : int=True , lowercase : Optional[Any]=True , lowercase : Union[str, Any]=99 , lowercase : Union[str, Any]=32 , lowercase : List[Any]=5 , lowercase : List[str]=4 , lowercase : Union[str, Any]=37 , lowercase : int="gelu" , lowercase : List[str]=0.1 , lowercase : Any=0.1 , lowercase : List[str]=512 , lowercase : List[str]=16 , lowercase : Dict=2 , lowercase : Optional[Any]=0.02 , lowercase : Dict=4 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def snake_case__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""albert-base-v2""" )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
__lowercase = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase = model(lowercase , attention_mask=lowercase )[0]
__lowercase = (1, 11, 768)
self.assertEqual(output.shape , lowercase )
__lowercase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
| 720 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """sew"""
def __init__( self : List[Any] , lowercase : int=32 , lowercase : List[str]=768 , lowercase : Dict=12 , lowercase : str=12 , lowercase : str=3_072 , lowercase : Optional[int]=2 , lowercase : List[str]="gelu" , lowercase : List[str]=0.1 , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : Any=0.0 , lowercase : Dict=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[str]=0.02 , lowercase : Dict=1E-5 , lowercase : Tuple="group" , lowercase : int="gelu" , lowercase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase : List[str]=False , lowercase : Tuple=128 , lowercase : int=16 , lowercase : Union[str, Any]=True , lowercase : List[str]=0.05 , lowercase : Optional[int]=10 , lowercase : Any=2 , lowercase : Optional[Any]=0.0 , lowercase : Optional[Any]=10 , lowercase : int=0 , lowercase : Optional[int]="mean" , lowercase : List[Any]=False , lowercase : str=False , lowercase : int=256 , lowercase : str=0 , lowercase : List[Any]=1 , lowercase : List[Any]=2 , **lowercase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = squeeze_factor
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# sequence classification
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
@property
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 634 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowercase : List[Any] , **lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
requires_backends(self , """decord""" )
self.check_model_type(lowercase )
def snake_case__ ( self : Optional[int] , lowercase : Optional[int]=None , lowercase : List[str]=None , lowercase : str=None ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {}
if frame_sampling_rate is not None:
__lowercase = frame_sampling_rate
if num_frames is not None:
__lowercase = num_frames
__lowercase = {}
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , lowercase : Union[str, List[str]] , **lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
return super().__call__(lowercase , **lowercase )
def snake_case__ ( self : Tuple , lowercase : Union[str, Any] , lowercase : List[str]=None , lowercase : Dict=1 ) -> Any:
"""simple docstring"""
if num_frames is None:
__lowercase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowercase = BytesIO(requests.get(lowercase ).content )
__lowercase = VideoReader(lowercase )
videoreader.seek(0 )
__lowercase = 0
__lowercase = num_frames * frame_sampling_rate - 1
__lowercase = np.linspace(lowercase , lowercase , num=lowercase , dtype=np.intaa )
__lowercase = videoreader.get_batch(lowercase ).asnumpy()
__lowercase = list(lowercase )
__lowercase = self.image_processor(lowercase , return_tensors=self.framework )
return model_inputs
def snake_case__ ( self : Tuple , lowercase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = self.model(**lowercase )
return model_outputs
def snake_case__ ( self : Optional[int] , lowercase : Optional[Any] , lowercase : Tuple=5 ) -> int:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.softmax(-1 )[0]
__lowercase , __lowercase = probs.topk(lowercase )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
| 721 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = WavaVecaPhonemeCTCTokenizer
lowercase__ : Optional[int] = False
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
super().setUp()
__lowercase = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
def snake_case__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[str]=False , lowercase : List[str]=20 , lowercase : str=5 ) -> Tuple[str, list]:
"""simple docstring"""
__lowercase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )) for i in range(len(lowercase ) )]
__lowercase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
__lowercase = """ """ + output_txt
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def snake_case__ ( self : Tuple , **lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
__lowercase = tokenizer("""m xxx ɪ""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
__lowercase = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__lowercase = tokenizer("""maɪ c""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [3, 200] ) # mai should be <unk> (=3)
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def snake_case__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase )
__lowercase = tokenizer.batch_decode(lowercase , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowercase )
__lowercase = """Hello how are you"""
__lowercase = tokenizer(lowercase , phonemizer_lang="""en-us""" ).input_ids
__lowercase = tokenizer(lowercase , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowercase , lowercase )
__lowercase = tokenizer.decode(lowercase )
__lowercase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowercase , """ɛ l o h aʊ a ʁ j u""" )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how Are you"""
__lowercase = """hello how are you"""
__lowercase = tokenizer(lowercase ).input_ids
__lowercase = tokenizer(lowercase ).input_ids
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def snake_case__ ( lowercase : List[str] , lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowercase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__lowercase = tokenizer.decode(lowercase , output_char_offsets=lowercase , filter_word_delimiter_token=lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowercase , lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowercase : List[str] , lowercase : Dict ):
self.assertTrue(isinstance(lowercase , lowercase ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase ) )
# transform list to ModelOutput
__lowercase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowercase : List[Any] , lowercase : Optional[int] ):
if isinstance(lowercase , lowercase ):
[recursive_check(lowercase , lowercase ) for la, la in zip(lowercase , lowercase )]
self.assertEqual(lowercase , lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowercase = tokenizer.batch_decode(lowercase , output_char_offsets=lowercase )
__lowercase = [tokenizer.decode(lowercase , output_char_offsets=lowercase ) for ids in sample_ids]
check_list_tuples_equal(lowercase , lowercase )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def snake_case__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def snake_case__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowercase = tokenizer.add_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size + len(lowercase ) )
__lowercase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowercase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowercase = tokenizer.add_special_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size_a + len(lowercase ) )
__lowercase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
__lowercase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(output["""text"""] , lowercase )
| 634 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _snake_case :
"""simple docstring"""
a = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a = field(
default=__snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a = field(
default=__snake_case , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
a = field(
default=__snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a = field(
default=__snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
a = field(
default=__snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class _snake_case :
"""simple docstring"""
a = field(
default=__snake_case , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a = field(
default=__snake_case , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
a = field(
default=__snake_case , metadata={"help": "Train language if it is different from the evaluation language."} )
a = field(
default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a = field(
default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a = field(
default=__snake_case , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
a = field(
default=__snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
a = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a = field(
default=__snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
a = field(
default=__snake_case , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def lowerCamelCase_()-> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , __SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level()
logger.setLevel(__SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_SCREAMING_SNAKE_CASE : str = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_SCREAMING_SNAKE_CASE : Tuple = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE : Any = train_dataset.features["""label"""].names
if training_args.do_eval:
_SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
_SCREAMING_SNAKE_CASE : Dict = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE : str = predict_dataset.features["""label"""].names
# Labels
_SCREAMING_SNAKE_CASE : List[Any] = len(__SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__SCREAMING_SNAKE_CASE , idalabel={str(__SCREAMING_SNAKE_CASE ): label for i, label in enumerate(__SCREAMING_SNAKE_CASE )} , labelaid={label: i for i, label in enumerate(__SCREAMING_SNAKE_CASE )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE : Tuple = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_SCREAMING_SNAKE_CASE : Optional[Any] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_SCREAMING_SNAKE_CASE : str = False
def preprocess_function(__SCREAMING_SNAKE_CASE ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=__SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length , truncation=__SCREAMING_SNAKE_CASE , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
_SCREAMING_SNAKE_CASE : Union[str, Any] = train_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
_SCREAMING_SNAKE_CASE : Dict = train_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE : Tuple = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
_SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
_SCREAMING_SNAKE_CASE : Any = eval_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_SCREAMING_SNAKE_CASE : str = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_predict_samples )
_SCREAMING_SNAKE_CASE : int = predict_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
_SCREAMING_SNAKE_CASE : Dict = predict_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
_SCREAMING_SNAKE_CASE : List[Any] = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , __SCREAMING_SNAKE_CASE ) else p.predictions
_SCREAMING_SNAKE_CASE : Optional[Any] = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_SCREAMING_SNAKE_CASE : str = default_data_collator
elif training_args.fpaa:
_SCREAMING_SNAKE_CASE : List[str] = DataCollatorWithPadding(__SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
_SCREAMING_SNAKE_CASE : int = None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE : str = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE : Tuple = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE : Dict = last_checkpoint
_SCREAMING_SNAKE_CASE : int = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = train_result.metrics
_SCREAMING_SNAKE_CASE : List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__SCREAMING_SNAKE_CASE )
)
_SCREAMING_SNAKE_CASE : Tuple = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("""train""" , __SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_SCREAMING_SNAKE_CASE : int = trainer.evaluate(eval_dataset=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""eval""" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("""eval""" , __SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = trainer.predict(__SCREAMING_SNAKE_CASE , metric_key_prefix="""predict""" )
_SCREAMING_SNAKE_CASE : Tuple = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__SCREAMING_SNAKE_CASE )
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""predict""" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("""predict""" , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 635 | """simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]:
set_seed(3 )
# generate train_data and objective_set
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE )
print("""computing perplexity on objective set""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item()
print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]:
set_seed(42 )
# Load pre-trained model
_SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE )
# Train secondary learner
_SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(__SCREAMING_SNAKE_CASE )
secondary_learner.eval()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : int = []
# Compute the performance of the transformer model at the beginning
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
for epoch in range(int(__SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(__SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
_SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = True
if secondary_learner is not None:
_SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward(
torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
_SCREAMING_SNAKE_CASE : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase_()-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_SCREAMING_SNAKE_CASE : int = training_secondary_learner(
__SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 635 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
"""simple docstring"""
a = LongformerTokenizer
a = True
a = LongformerTokenizerFast
a = True
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_SCREAMING_SNAKE_CASE : List[str] = dict(zip(_A , range(len(_A))))
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_SCREAMING_SNAKE_CASE : List[Any] = {"""unk_token""": """<unk>"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
_SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_A) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(_A))
def _lowerCAmelCase ( self : Optional[Any] , **_A : List[Any]):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A)
def _lowerCAmelCase ( self : Any , **_A : Any):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A)
def _lowerCAmelCase ( self : Any , _A : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = """lower newer"""
_SCREAMING_SNAKE_CASE : Dict = """lower newer"""
return input_text, output_text
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map)
_SCREAMING_SNAKE_CASE : str = """lower newer"""
_SCREAMING_SNAKE_CASE : Optional[int] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(_A) # , add_prefix_space=True)
self.assertListEqual(_A , _A)
_SCREAMING_SNAKE_CASE : int = tokens + [tokenizer.unk_token]
_SCREAMING_SNAKE_CASE : Optional[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , _A)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=_A) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2])
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=_A) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""")
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : str = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_A , add_prefix_space=_A)
_SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_A , add_prefix_space=_A)
_SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(_A)
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Any = """Encode this sequence."""
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""")[0]]
# Testing encoder arguments
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(_A , add_special_tokens=_A , add_prefix_space=_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(_A , _A)
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(_A , add_special_tokens=_A , add_prefix_space=_A)
_SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(_A , _A)
tokenizer.add_special_tokens({"""bos_token""": """<s>"""})
_SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(_A , _A)
# Testing spaces after special tokens
_SCREAMING_SNAKE_CASE : Dict = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(_A , lstrip=_A , rstrip=_A)}) # mask token has a left space
_SCREAMING_SNAKE_CASE : str = tokenizer.convert_tokens_to_ids(_A)
_SCREAMING_SNAKE_CASE : Optional[int] = """Encode <mask> sequence"""
_SCREAMING_SNAKE_CASE : Optional[int] = """Encode <mask>sequence"""
_SCREAMING_SNAKE_CASE : int = tokenizer.encode(_A)
_SCREAMING_SNAKE_CASE : int = encoded.index(_A)
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(_A , _A)
_SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A)
_SCREAMING_SNAKE_CASE : int = encoded.index(_A)
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(_A , _A)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
_SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : str = """A, <mask> AllenNLP sentence."""
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A)
_SCREAMING_SNAKE_CASE : Tuple = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
_SCREAMING_SNAKE_CASE : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
_SCREAMING_SNAKE_CASE : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
_A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
_A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2):
_SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_A , add_prefix_space=_A , trim_offsets=_A)
_SCREAMING_SNAKE_CASE : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
_SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , _A)
self.assertEqual(post_processor_state["""add_prefix_space"""] , _A)
self.assertEqual(post_processor_state["""trim_offsets"""] , _A)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
_SCREAMING_SNAKE_CASE : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_SCREAMING_SNAKE_CASE : int = f"""{text_of_1_token} {text_of_1_token}"""
_SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , add_prefix_space=_A , trim_offsets=_A)
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A)
self.assertEqual(encoding.offset_mapping[0] , (0, len(_A)))
self.assertEqual(
encoding.offset_mapping[1] , (len(_A) + 1, len(_A) + 1 + len(_A)) , )
_SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , add_prefix_space=_A , trim_offsets=_A)
_SCREAMING_SNAKE_CASE : Dict = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A)
self.assertEqual(encoding.offset_mapping[0] , (0, len(_A)))
self.assertEqual(
encoding.offset_mapping[1] , (len(_A) + 1, len(_A) + 1 + len(_A)) , )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , add_prefix_space=_A , trim_offsets=_A)
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A)
self.assertEqual(encoding.offset_mapping[0] , (0, len(_A)))
self.assertEqual(
encoding.offset_mapping[1] , (len(_A), len(_A) + 1 + len(_A)) , )
_SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , add_prefix_space=_A , trim_offsets=_A)
_SCREAMING_SNAKE_CASE : str = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A)
self.assertEqual(encoding.offset_mapping[0] , (0, len(_A)))
self.assertEqual(
encoding.offset_mapping[1] , (len(_A), len(_A) + 1 + len(_A)) , )
_SCREAMING_SNAKE_CASE : Any = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , add_prefix_space=_A , trim_offsets=_A)
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_A)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_A) + 1, 1 + len(_A) + 1 + len(_A)) , )
_SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , add_prefix_space=_A , trim_offsets=_A)
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_A)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_A), 1 + len(_A) + 1 + len(_A)) , )
_SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , add_prefix_space=_A , trim_offsets=_A)
_SCREAMING_SNAKE_CASE : str = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_A)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_A), 1 + len(_A) + 1 + len(_A)) , )
| 635 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["image_processor", "tokenizer"]
a = "ChineseCLIPImageProcessor"
a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
_SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""")
_SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_A , _A)
_SCREAMING_SNAKE_CASE : Dict = self.image_processor
def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A)
def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A)
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , )
return self.image_processor_class
| 635 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = inspect.getfile(accelerate.test_utils)
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_SCREAMING_SNAKE_CASE : Optional[int] = test_metrics
@require_cpu
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
debug_launcher(self.test_metrics.main)
@require_single_gpu
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""")
_SCREAMING_SNAKE_CASE : Optional[int] = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(_A , env=os.environ.copy())
| 635 | """simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ['''model.decoder.embed_positions.weights''']
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
if "emb" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]:
_SCREAMING_SNAKE_CASE : str = list(state_dict.keys() )
_SCREAMING_SNAKE_CASE : Tuple = {}
for key in keys:
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
_SCREAMING_SNAKE_CASE : str = val[:hidden_size, :]
_SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_SCREAMING_SNAKE_CASE : int = val
else:
_SCREAMING_SNAKE_CASE : Dict = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
_SCREAMING_SNAKE_CASE : str = 24
_SCREAMING_SNAKE_CASE : Any = 16
elif checkpoint == "medium":
_SCREAMING_SNAKE_CASE : Dict = 1_536
_SCREAMING_SNAKE_CASE : Union[str, Any] = 48
_SCREAMING_SNAKE_CASE : Optional[Any] = 24
elif checkpoint == "large":
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
_SCREAMING_SNAKE_CASE : Optional[int] = 48
_SCREAMING_SNAKE_CASE : str = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig(
hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str:
_SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict(
__SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
_SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE )
# check we can do a forward pass
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_048
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
# set other default generation config params
_SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate )
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : int = 3.0
if pytorch_dump_folder is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 635 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _snake_case :
"""simple docstring"""
a = 42
# setable values
a = 42
a = 42
a = None
@classmethod
def _lowerCAmelCase ( cls : str , _A : CommonSchedulerState , _A : jnp.ndarray , _A : jnp.ndarray):
"""simple docstring"""
return cls(common=_A , init_noise_sigma=_A , timesteps=_A)
@dataclass
class _snake_case ( __snake_case ):
"""simple docstring"""
a = 42
class _snake_case ( __snake_case , __snake_case ):
"""simple docstring"""
a = [e.name for e in FlaxKarrasDiffusionSchedulers]
a = 42
@property
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Optional[int] , _A : int = 1_0_0_0 , _A : float = 0.0_001 , _A : float = 0.02 , _A : str = "linear" , _A : Optional[jnp.ndarray] = None , _A : str = "fixed_small" , _A : bool = True , _A : str = "epsilon" , _A : jnp.dtype = jnp.floataa , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = dtype
def _lowerCAmelCase ( self : Dict , _A : Optional[CommonSchedulerState] = None):
"""simple docstring"""
if common is None:
_SCREAMING_SNAKE_CASE : Any = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
_SCREAMING_SNAKE_CASE : Any = jnp.array(1.0 , dtype=self.dtype)
_SCREAMING_SNAKE_CASE : int = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=_A , init_noise_sigma=_A , timesteps=_A , )
def _lowerCAmelCase ( self : List[str] , _A : DDPMSchedulerState , _A : jnp.ndarray , _A : Optional[int] = None):
"""simple docstring"""
return sample
def _lowerCAmelCase ( self : Any , _A : DDPMSchedulerState , _A : int , _A : Tuple = ()):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE : List[str] = (jnp.arange(0 , _A) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_A , timesteps=_A , )
def _lowerCAmelCase ( self : Dict , _A : DDPMSchedulerState , _A : Union[str, Any] , _A : Tuple=None , _A : List[Any]=None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = state.common.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_SCREAMING_SNAKE_CASE : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_SCREAMING_SNAKE_CASE : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_SCREAMING_SNAKE_CASE : int = jnp.clip(_A , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_SCREAMING_SNAKE_CASE : str = jnp.log(jnp.clip(_A , a_min=1e-20))
elif variance_type == "fixed_large":
_SCREAMING_SNAKE_CASE : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_SCREAMING_SNAKE_CASE : Tuple = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_SCREAMING_SNAKE_CASE : int = variance
_SCREAMING_SNAKE_CASE : int = state.common.betas[t]
_SCREAMING_SNAKE_CASE : int = (predicted_variance + 1) / 2
_SCREAMING_SNAKE_CASE : Dict = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCAmelCase ( self : Dict , _A : DDPMSchedulerState , _A : jnp.ndarray , _A : int , _A : jnp.ndarray , _A : Optional[jax.random.KeyArray] = None , _A : bool = True , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = timestep
if key is None:
_SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = jnp.split(_A , sample.shape[1] , axis=1)
else:
_SCREAMING_SNAKE_CASE : Dict = None
# 1. compute alphas, betas
_SCREAMING_SNAKE_CASE : Optional[Any] = state.common.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_SCREAMING_SNAKE_CASE : str = model_output
elif self.config.prediction_type == "v_prediction":
_SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
""" for the FlaxDDPMScheduler.""")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_SCREAMING_SNAKE_CASE : int = jnp.clip(_A , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_SCREAMING_SNAKE_CASE : int = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_SCREAMING_SNAKE_CASE : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_SCREAMING_SNAKE_CASE : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(_A , num=1)
_SCREAMING_SNAKE_CASE : str = jax.random.normal(_A , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(_A , _A , predicted_variance=_A) ** 0.5) * noise
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
_SCREAMING_SNAKE_CASE : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_A , state=_A)
def _lowerCAmelCase ( self : Optional[Any] , _A : DDPMSchedulerState , _A : jnp.ndarray , _A : jnp.ndarray , _A : jnp.ndarray , ):
"""simple docstring"""
return add_noise_common(state.common , _A , _A , _A)
def _lowerCAmelCase ( self : Any , _A : DDPMSchedulerState , _A : jnp.ndarray , _A : jnp.ndarray , _A : jnp.ndarray , ):
"""simple docstring"""
return get_velocity_common(state.common , _A , _A , _A)
def __len__( self : Union[str, Any]):
"""simple docstring"""
return self.config.num_train_timesteps
| 635 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "sew"
def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
_SCREAMING_SNAKE_CASE : Dict = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : str = conv_bias
_SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim)
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : str = squeeze_factor
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : int = activation_dropout
_SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
_SCREAMING_SNAKE_CASE : str = final_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : List[str] = mask_time_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob
_SCREAMING_SNAKE_CASE : int = mask_feature_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : int = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 635 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = image.size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_SCREAMING_SNAKE_CASE : Dict = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
_SCREAMING_SNAKE_CASE : List[str] = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 2_55.0
_SCREAMING_SNAKE_CASE : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(__SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class _snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self : List[Any] , _A : VQModel , _A : UNetaDModel , _A : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_A , unet=_A , scheduler=_A)
@torch.no_grad()
def __call__( self : Tuple , _A : Union[torch.Tensor, PIL.Image.Image] = None , _A : Optional[int] = 1 , _A : Optional[int] = 1_0_0 , _A : Optional[float] = 0.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
if isinstance(_A , PIL.Image.Image):
_SCREAMING_SNAKE_CASE : Tuple = 1
elif isinstance(_A , torch.Tensor):
_SCREAMING_SNAKE_CASE : Any = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_A)}""")
if isinstance(_A , PIL.Image.Image):
_SCREAMING_SNAKE_CASE : Optional[Any] = preprocess(_A)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_SCREAMING_SNAKE_CASE : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_SCREAMING_SNAKE_CASE : int = next(self.unet.parameters()).dtype
_SCREAMING_SNAKE_CASE : int = randn_tensor(_A , generator=_A , device=self.device , dtype=_A)
_SCREAMING_SNAKE_CASE : List[str] = image.to(device=self.device , dtype=_A)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_A , device=self.device)
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_SCREAMING_SNAKE_CASE : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_SCREAMING_SNAKE_CASE : Tuple = """eta""" in set(inspect.signature(self.scheduler.step).parameters.keys())
_SCREAMING_SNAKE_CASE : Dict = {}
if accepts_eta:
_SCREAMING_SNAKE_CASE : int = eta
for t in self.progress_bar(_A):
# concat latents and low resolution image in the channel dimension.
_SCREAMING_SNAKE_CASE : int = torch.cat([latents, image] , dim=1)
_SCREAMING_SNAKE_CASE : Tuple = self.scheduler.scale_model_input(_A , _A)
# predict the noise residual
_SCREAMING_SNAKE_CASE : Tuple = self.unet(_A , _A).sample
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step(_A , _A , _A , **_A).prev_sample
# decode the image latents with the VQVAE
_SCREAMING_SNAKE_CASE : Optional[Any] = self.vqvae.decode(_A).sample
_SCREAMING_SNAKE_CASE : str = torch.clamp(_A , -1.0 , 1.0)
_SCREAMING_SNAKE_CASE : Any = image / 2 + 0.5
_SCREAMING_SNAKE_CASE : List[str] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(_A)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A)
| 635 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
lowerCAmelCase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Tuple:
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
_SCREAMING_SNAKE_CASE : str = Image.open(__SCREAMING_SNAKE_CASE )
return im.convert("""RGB""" )
@dataclass
class _snake_case :
"""simple docstring"""
a = field(
default=__snake_case , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
a = field(
default=__snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a = field(default=__snake_case , metadata={"help": "A folder containing the training data."} )
a = field(default=__snake_case , metadata={"help": "A folder containing the validation data."} )
a = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
a = field(
default=__snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a = field(
default=__snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCAmelCase ( self : str):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""")
@dataclass
class _snake_case :
"""simple docstring"""
a = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
a = field(
default=__snake_case , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__snake_case )} , )
a = field(
default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
a = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a = field(default=__snake_case , metadata={"help": "Name or path of preprocessor config."} )
a = field(
default=__snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
a = field(
default=__snake_case , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = torch.stack([example["""pixel_values"""] for example in examples] )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCamelCase_()-> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : str = training_args.get_process_log_level()
logger.setLevel(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_SCREAMING_SNAKE_CASE : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_SCREAMING_SNAKE_CASE : List[str] = {}
if data_args.train_dir is not None:
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(data_args.validation_dir , """**""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
"""imagefolder""" , data_files=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
_SCREAMING_SNAKE_CASE : List[Any] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
_SCREAMING_SNAKE_CASE : Any = dataset["""train"""].train_test_split(data_args.train_val_split )
_SCREAMING_SNAKE_CASE : Dict = split["""train"""]
_SCREAMING_SNAKE_CASE : Tuple = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_SCREAMING_SNAKE_CASE : int = dataset["""train"""].features["""labels"""].names
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = {}, {}
for i, label in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = str(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = label
# Load the accuracy metric from the datasets package
_SCREAMING_SNAKE_CASE : int = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__SCREAMING_SNAKE_CASE ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__SCREAMING_SNAKE_CASE ) , labelaid=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE : Dict = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE : Dict = (image_processor.size["""height"""], image_processor.size["""width"""])
_SCREAMING_SNAKE_CASE : List[str] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_SCREAMING_SNAKE_CASE : List[str] = Compose(
[
RandomResizedCrop(__SCREAMING_SNAKE_CASE ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_SCREAMING_SNAKE_CASE : Optional[int] = Compose(
[
Resize(__SCREAMING_SNAKE_CASE ),
CenterCrop(__SCREAMING_SNAKE_CASE ),
ToTensor(),
normalize,
] )
def train_transforms(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE : str = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE : Dict = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__SCREAMING_SNAKE_CASE )
# Initalize our trainer
_SCREAMING_SNAKE_CASE : Optional[int] = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE : Dict = last_checkpoint
_SCREAMING_SNAKE_CASE : Dict = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE : Dict = trainer.evaluate()
trainer.log_metrics("""eval""" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("""eval""" , __SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE : List[str] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 635 | """simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : int = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : List[Any] = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = parquet_path
elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for split in splits:
_SCREAMING_SNAKE_CASE : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
if split:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE : Optional[int] = """train"""
_SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE : str = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
| 635 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase_ = 250004
lowerCAmelCase_ = 250020
@require_sentencepiece
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
"""simple docstring"""
a = MBartTokenizer
a = MBartTokenizerFast
a = True
a = True
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE : Dict = MBartTokenizer(_A , keep_accents=_A)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = MBartTokenizer(_A , keep_accents=_A)
_SCREAMING_SNAKE_CASE : str = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(_A)
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(_A)
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_SCREAMING_SNAKE_CASE : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
_SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.save_pretrained(_A)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
_SCREAMING_SNAKE_CASE : List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(_A , _A)
# Checks everything loads correctly in the same way
_SCREAMING_SNAKE_CASE : Any = tokenizer_r.from_pretrained(_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.from_pretrained(_A)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A)
# Save tokenizer rust, legacy_format=True
_SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A)
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(_A)
# Checks it save with the same files
self.assertSequenceEqual(_A , _A)
# Checks everything loads correctly in the same way
_SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(_A)
_SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(_A)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A))
shutil.rmtree(_A)
# Save tokenizer rust, legacy_format=False
_SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A)
_SCREAMING_SNAKE_CASE : Any = tokenizer_p.save_pretrained(_A)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
_SCREAMING_SNAKE_CASE : Any = tokenizer_r.from_pretrained(_A)
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.from_pretrained(_A)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A))
shutil.rmtree(_A)
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
a = "facebook/mbart-large-en-ro"
a = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
a = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
a = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def _lowerCAmelCase ( cls : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""")
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
return cls
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A)
def _lowerCAmelCase ( self : str):
"""simple docstring"""
self.assertIn(_A , self.tokenizer.all_special_ids)
_SCREAMING_SNAKE_CASE : Any = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.decode(_A , skip_special_tokens=_A)
_SCREAMING_SNAKE_CASE : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A)
self.assertEqual(_A , _A)
self.assertNotIn(self.tokenizer.eos_token , _A)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , _A)
_SCREAMING_SNAKE_CASE : Tuple = 1_0
_SCREAMING_SNAKE_CASE : int = self.tokenizer(_A , max_length=_A , truncation=_A).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , _A)
self.assertEqual(len(_A) , _A)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""]) , [2_5_0_0_2_6, 2_5_0_0_0_1])
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = MBartTokenizer.from_pretrained(_A)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A)
@require_torch
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors="""pt""")
_SCREAMING_SNAKE_CASE : str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens) , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id)
self.assertIsInstance(_A , _A)
self.assertEqual((2, 1_4) , batch.input_ids.shape)
self.assertEqual((2, 1_4) , batch.attention_mask.shape)
_SCREAMING_SNAKE_CASE : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE])
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors="""pt""")
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=1_0 , return_tensors="""pt""")
_SCREAMING_SNAKE_CASE : int = targets["""input_ids"""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = shift_tokens_right(_A , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0)
@require_torch
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""")
self.assertEqual(
nested_simplify(_A) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 635 | """simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""only integers accepted as input""" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 635 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = "isbn/0140328726" )-> dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
_SCREAMING_SNAKE_CASE : List[str] = F"""{olid} is not a valid Open Library olid"""
raise ValueError(__SCREAMING_SNAKE_CASE )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> dict:
_SCREAMING_SNAKE_CASE : Dict = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_SCREAMING_SNAKE_CASE : Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_SCREAMING_SNAKE_CASE : int = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
_SCREAMING_SNAKE_CASE : str = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = """, """.join(__SCREAMING_SNAKE_CASE )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase_ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
lowerCAmelCase_ = summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 635 | """simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : str = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : List[Any] = -1
_SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0])
_SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A)
_SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A)
thread.start()
_SCREAMING_SNAKE_CASE : Any = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :]
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""")
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A)
_SCREAMING_SNAKE_CASE : int = -1
_SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A)
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n"
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Tuple = -1
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001)
_SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A):
_SCREAMING_SNAKE_CASE : str = """"""
for new_text in streamer:
streamer_text += new_text
| 635 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = [[1, 2, 4], [1, 2, 3, 4]]
_SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(_A)
self.assertTrue(isinstance(dc.token_ids , _A))
with self.assertRaises(_A):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(_A):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A):
DisjunctiveConstraint(_A) # fails here
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
_SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = dc.update(1)
_SCREAMING_SNAKE_CASE : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = dc.update(2)
_SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = dc.update(3)
_SCREAMING_SNAKE_CASE : List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(_A)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_SCREAMING_SNAKE_CASE : Dict = DisjunctiveConstraint(_A)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 635 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "facebook/bart-large-mnli"
a = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a = "text_classifier"
a = AutoTokenizer
a = AutoModelForSequenceClassification
a = ["text", ["text"]]
a = ["text"]
def _lowerCAmelCase ( self : int):
"""simple docstring"""
super().setup()
_SCREAMING_SNAKE_CASE : Any = self.model.config
_SCREAMING_SNAKE_CASE : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail"""):
_SCREAMING_SNAKE_CASE : List[Any] = int(_A)
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""")
def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = labels
return self.pre_processor(
[text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = outputs.logits
_SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 635 | 1 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : Any = 1
_SCREAMING_SNAKE_CASE : str = 2
while i * i <= n:
_SCREAMING_SNAKE_CASE : str = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : Any = 1
while True:
i += 1
t_num += i
if count_divisors(__SCREAMING_SNAKE_CASE ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 635 | """simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
_SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
model.to(_A)
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""")
_SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""")
_SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A)
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(**_A)
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6))
self.assertEqual(logits.shape , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
| 635 | 1 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = TypeVar('''DatasetType''', Dataset, IterableDataset)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "first_exhausted" , )-> DatasetType:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(__SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(__SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__SCREAMING_SNAKE_CASE )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__SCREAMING_SNAKE_CASE ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__SCREAMING_SNAKE_CASE ).__name__}.""" )
if i == 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , stopping_strategy=__SCREAMING_SNAKE_CASE )
else:
return _interleave_iterable_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , stopping_strategy=__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , )-> DatasetType:
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(__SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(__SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__SCREAMING_SNAKE_CASE )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__SCREAMING_SNAKE_CASE ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__SCREAMING_SNAKE_CASE ).__name__}.""" )
if i == 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = (
(Dataset, IterableDataset) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , axis=__SCREAMING_SNAKE_CASE )
else:
return _concatenate_iterable_datasets(__SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , axis=__SCREAMING_SNAKE_CASE )
| 635 | """simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "M-CLIP"
def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = transformerDimSize
_SCREAMING_SNAKE_CASE : List[str] = imageDimSize
super().__init__(**_A)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = MCLIPConfig
def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict):
"""simple docstring"""
super().__init__(_A , *_A , **_A)
_SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A)
_SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_A), embs
| 635 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Union[List[ControlNetModel], Tuple[ControlNetModel]]):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(_A)
def _lowerCAmelCase ( self : List[Any] , _A : torch.FloatTensor , _A : Union[torch.Tensor, float, int] , _A : torch.Tensor , _A : List[torch.tensor] , _A : List[float] , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[Dict[str, Any]] = None , _A : bool = False , _A : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(_A , _A , self.nets)):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = controlnet(
_A , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , )
# merge samples
if i == 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = down_samples, mid_sample
else:
_SCREAMING_SNAKE_CASE : int = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_A , _A)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowerCAmelCase ( self : Any , _A : Union[str, os.PathLike] , _A : bool = True , _A : Callable = None , _A : bool = False , _A : Optional[str] = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : int = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_A , is_main_process=_A , save_function=_A , safe_serialization=_A , variant=_A , )
idx += 1
_SCREAMING_SNAKE_CASE : int = model_path_to_save + f"""_{idx}"""
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] , _A : Optional[Union[str, os.PathLike]] , **_A : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_SCREAMING_SNAKE_CASE : Optional[int] = pretrained_model_path
while os.path.isdir(_A):
_SCREAMING_SNAKE_CASE : List[str] = ControlNetModel.from_pretrained(_A , **_A)
controlnets.append(_A)
idx += 1
_SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(_A)} controlnets loaded from {pretrained_model_path}.""")
if len(_A) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(_A)}. Expected at least {pretrained_model_path + "_0"}.""")
return cls(_A)
| 635 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
_SCREAMING_SNAKE_CASE : int = precision
_SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 )
_SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : str = 13_591_409
_SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE )
for k in range(1 , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 635 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.