code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = """▁"""
SCREAMING_SNAKE_CASE__ : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/pegasus-xsum""": 5_1_2,
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Dict = PegasusTokenizer
_UpperCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case=None , snake_case=None , snake_case="<pad>" , snake_case="</s>" , snake_case="<unk>" , snake_case="<mask_2>" , snake_case="<mask_1>" , snake_case=None , snake_case=103 , **snake_case , ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(snake_case , snake_case ):
raise TypeError(
F"""additional_special_tokens should be of type {type(snake_case )}, but is"""
F""" {type(snake_case )}""" )
a__ : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(snake_case ) , self.offset - 1 )
]
if len(set(snake_case ) ) != len(snake_case ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
a__ : List[str] = additional_special_tokens_extended
else:
a__ : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
snake_case , tokenizer_file=snake_case , pad_token=snake_case , eos_token=snake_case , unk_token=snake_case , mask_token=snake_case , mask_token_sent=snake_case , offset=snake_case , additional_special_tokens=snake_case , **snake_case , )
a__ : int = vocab_file
a__ : Union[str, Any] = False if not self.vocab_file else True
def _snake_case ( self , snake_case ) -> Tuple:
"""simple docstring"""
a__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , snake_case , snake_case = None , snake_case = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(snake_case )
elif token_ids_a is None:
return self._special_token_mask(snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , snake_case , snake_case=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a__ : Any = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 1 |
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Optional[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
a__ : List[Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a__ : Dict = min(lowerCamelCase , lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 629 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 1 |
def _A ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(lowerCamelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(lowerCamelCase ) == 1:
return True
a__ : str = series[1] - series[0]
for index in range(len(lowerCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _A ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(lowerCamelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
a__ : Optional[int] = 0
for val in series:
answer += val
return answer / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCAmelCase :
def __init__( self , snake_case , ) -> str:
"""simple docstring"""
a__ : Optional[Any] = parent
a__ : Union[str, Any] = 13
a__ : Any = 7
a__ : Any = True
a__ : Optional[int] = True
a__ : int = True
a__ : List[Any] = 99
a__ : int = 32
a__ : int = 2
a__ : Any = 4
a__ : Optional[int] = 37
a__ : Tuple = "gelu"
a__ : Optional[int] = 0.1
a__ : List[str] = 0.1
a__ : str = 512
a__ : List[str] = 16
a__ : List[Any] = 2
a__ : Tuple = 0.02
a__ : Any = 3
a__ : int = 4
a__ : Union[str, Any] = None
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Tuple = None
if self.use_input_mask:
a__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
a__ : Tuple = None
a__ : int = None
a__ : int = None
if self.use_labels:
a__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a__ : str = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = self.prepare_config_and_inputs()
a__ : Union[str, Any] = True
a__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
"""simple docstring"""
a__ : int = TFEsmModel(config=snake_case )
a__ : Any = {"input_ids": input_ids, "attention_mask": input_mask}
a__ : Optional[int] = model(snake_case )
a__ : List[Any] = [input_ids, input_mask]
a__ : Optional[int] = model(snake_case )
a__ : Tuple = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = True
a__ : Union[str, Any] = TFEsmModel(config=snake_case )
a__ : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
a__ : Optional[int] = model(snake_case )
a__ : Union[str, Any] = [input_ids, input_mask]
a__ : Dict = model(snake_case , encoder_hidden_states=snake_case )
# Also check the case where encoder outputs are not passed
a__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
"""simple docstring"""
a__ : Tuple = TFEsmForMaskedLM(config=snake_case )
a__ : int = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
"""simple docstring"""
a__ : Union[str, Any] = self.num_labels
a__ : Tuple = TFEsmForTokenClassification(config=snake_case )
a__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
a__ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : int = config_and_inputs
a__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Tuple = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = False
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Optional[int] = TFEsmModelTester(self )
a__ : Dict = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = TFEsmModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("Protein models do not support embedding resizing." )
def _snake_case ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip("Protein models do not support embedding resizing." )
def _snake_case ( self ) -> Any:
"""simple docstring"""
pass
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[int] = model_class(snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
a__ : Any = model.get_bias()
assert isinstance(snake_case , snake_case )
for k, v in name.items():
assert isinstance(snake_case , tf.Variable )
else:
a__ : Optional[int] = model.get_output_embeddings()
assert x is None
a__ : Union[str, Any] = model.get_bias()
assert name is None
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Dict = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
a__ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
a__ : List[str] = model(snake_case )[0]
a__ : Dict = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , snake_case )
# compare the actual values for a slice.
a__ : Tuple = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : str = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
a__ : List[str] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a__ : Optional[Any] = model(snake_case )[0]
# compare the actual values for a slice.
a__ : Any = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 629 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , ) -> List[str]:
"""simple docstring"""
a__ : List[Any] = size if size is not None else {"shortest_edge": 18}
a__ : Any = crop_size if crop_size is not None else {"height": 18, "width": 18}
a__ : str = parent
a__ : Optional[int] = batch_size
a__ : List[str] = num_channels
a__ : Optional[Any] = image_size
a__ : Dict = min_resolution
a__ : Any = max_resolution
a__ : Optional[Any] = do_resize
a__ : Dict = size
a__ : int = do_center_crop
a__ : Tuple = crop_size
a__ : str = do_normalize
a__ : Any = image_mean
a__ : Tuple = image_std
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Any = LevitImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = LevitImageProcessingTester(self )
@property
def _snake_case ( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
a__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
pass
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
a__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[Any] = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
a__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : str = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
a__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Dict = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 629 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger(__name__)
def _A ( ):
a__ : Union[str, Any] = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCamelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCamelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCamelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCamelCase , default="data/dump" , help="The dump file prefix." )
a__ : Any = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
a__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
a__ : Union[str, Any] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
a__ : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
a__ : Dict = RobertaTokenizer.from_pretrained(args.tokenizer_name )
a__ : Optional[Any] = tokenizer.special_tokens_map["cls_token"] # `<s>`
a__ : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
a__ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
a__ : int = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
a__ : Optional[int] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
a__ : Tuple = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(lowerCamelCase )} examples to process.""" )
a__ : List[str] = []
a__ : Any = 0
a__ : Optional[Any] = 1_0000
a__ : int = time.time()
for text in data:
a__ : List[str] = F"""{bos} {text.strip()} {sep}"""
a__ : Optional[int] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
rslt.append(lowerCamelCase )
iter += 1
if iter % interval == 0:
a__ : Optional[int] = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
a__ : Dict = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(lowerCamelCase )} examples processed.""" )
a__ : int = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
a__ : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
a__ : Optional[int] = [np.uintaa(lowerCamelCase ) for d in rslt]
else:
a__ : Tuple = [np.intaa(lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(lowerCamelCase , "wb" ) as handle:
pickle.dump(rslt_ , lowerCamelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 629 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 1 |
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = """bertabs"""
def __init__( self , snake_case=30_522 , snake_case=512 , snake_case=6 , snake_case=512 , snake_case=8 , snake_case=512 , snake_case=0.2 , snake_case=6 , snake_case=768 , snake_case=8 , snake_case=2_048 , snake_case=0.2 , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(**snake_case )
a__ : Any = vocab_size
a__ : int = max_pos
a__ : Dict = enc_layers
a__ : Tuple = enc_hidden_size
a__ : List[Any] = enc_heads
a__ : Tuple = enc_ff_size
a__ : List[Any] = enc_dropout
a__ : List[str] = dec_layers
a__ : Dict = dec_hidden_size
a__ : Dict = dec_heads
a__ : Tuple = dec_ff_size
a__ : List[str] = dec_dropout
| 629 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : Tuple = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , snake_case ) -> Tuple:
"""simple docstring"""
a__ : Tuple = data
a__ : Node[T] | None = None
def __str__( self ) -> str:
"""simple docstring"""
return F"""{self.data}"""
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
a__ : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
"""simple docstring"""
a__ : Tuple = self.top
while node:
yield node.data
a__ : Optional[int] = node.next
def __str__( self ) -> str:
"""simple docstring"""
return "->".join([str(snake_case ) for item in self] )
def __len__( self ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _snake_case ( self ) -> bool:
"""simple docstring"""
return self.top is None
def _snake_case ( self , snake_case ) -> None:
"""simple docstring"""
a__ : Any = Node(snake_case )
if not self.is_empty():
a__ : Optional[Any] = self.top
a__ : List[str] = node
def _snake_case ( self ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , snake_case )
a__ : Optional[Any] = self.top
a__ : Optional[Any] = self.top.next
return pop_node.data
def _snake_case ( self ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def _snake_case ( self ) -> None:
"""simple docstring"""
a__ : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 629 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 1 |
# Imports
import numpy as np
class __lowerCAmelCase :
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None ) -> List[str]:
"""simple docstring"""
self.set_matricies(red=snake_case , green=snake_case , blue=snake_case , red_edge=snake_case , nir=snake_case )
def _snake_case ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None ) -> Optional[Any]:
"""simple docstring"""
if red is not None:
a__ : Optional[int] = red
if green is not None:
a__ : Optional[int] = green
if blue is not None:
a__ : int = blue
if red_edge is not None:
a__ : Optional[int] = red_edge
if nir is not None:
a__ : Any = nir
return True
def _snake_case ( self , snake_case="" , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None ) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=snake_case , green=snake_case , blue=snake_case , red_edge=snake_case , nir=snake_case )
a__ : List[str] = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> str:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> int:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Dict:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Dict:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> Dict:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> str:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , snake_case=0.08 , snake_case=1.22 , snake_case=0.03 ) -> Dict:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> str:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir / self.green) - 1
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
"""simple docstring"""
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
return self.nir - self.green
def _snake_case ( self ) -> Dict:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : List[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _snake_case ( self , snake_case=0.16 ) -> int:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , snake_case=0.5 ) -> int:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _snake_case ( self , snake_case=None , snake_case=None ) -> str:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def _snake_case ( self ) -> Dict:
"""simple docstring"""
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> int:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> int:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
a__ : Any = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> Dict:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> Any:
"""simple docstring"""
return self.nir / self.red
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> Any:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 629 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _A ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(lowerCamelCase ):
if not isinstance(lowerCamelCase , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(lowerCamelCase )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCamelCase ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase ).__name__}.""" )
if i == 0:
a__ , a__ : Any = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase , lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCamelCase , lowerCamelCase , lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , stopping_strategy=lowerCamelCase )
else:
return _interleave_iterable_datasets(
lowerCamelCase , lowerCamelCase , lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , stopping_strategy=lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , ):
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(lowerCamelCase ):
if not isinstance(lowerCamelCase , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(lowerCamelCase )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCamelCase ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase ).__name__}.""" )
if i == 0:
a__ , a__ : Any = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase , lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , axis=lowerCamelCase )
else:
return _concatenate_iterable_datasets(lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , axis=lowerCamelCase )
| 629 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 1 |
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case , snake_case = 0 ) -> None:
"""simple docstring"""
a__ , a__ : Optional[int] = row, column
a__ : Tuple = [[default_value for c in range(snake_case )] for r in range(snake_case )]
def __str__( self ) -> str:
"""simple docstring"""
a__ : Dict = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
a__ : List[str] = 0
for row_vector in self.array:
for obj in row_vector:
a__ : Optional[int] = max(snake_case , len(str(snake_case ) ) )
a__ : int = F"""%{max_element_length}s"""
# Make string and return
def single_line(snake_case ) -> str:
nonlocal string_format_identifier
a__ : Union[str, Any] = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(snake_case ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
"""simple docstring"""
return str(self )
def _snake_case ( self , snake_case ) -> bool:
"""simple docstring"""
if not (isinstance(snake_case , (list, tuple) ) and len(snake_case ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , snake_case ) -> Any:
"""simple docstring"""
assert self.validate_indicies(snake_case )
return self.array[loc[0]][loc[1]]
def __setitem__( self , snake_case , snake_case ) -> None:
"""simple docstring"""
assert self.validate_indicies(snake_case )
a__ : Tuple = value
def __add__( self , snake_case ) -> Matrix:
"""simple docstring"""
assert isinstance(snake_case , snake_case )
assert self.row == another.row and self.column == another.column
# Add
a__ : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ : Optional[int] = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
"""simple docstring"""
a__ : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ : Optional[int] = -self[r, c]
return result
def __sub__( self , snake_case ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self , snake_case ) -> Matrix:
"""simple docstring"""
if isinstance(snake_case , (int, float) ): # Scalar multiplication
a__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ : Tuple = self[r, c] * another
return result
elif isinstance(snake_case , snake_case ): # Matrix multiplication
assert self.column == another.row
a__ : List[str] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
a__ : List[Any] = F"""Unsupported type given for another ({type(snake_case )})"""
raise TypeError(snake_case )
def _snake_case ( self ) -> Matrix:
"""simple docstring"""
a__ : Union[str, Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
a__ : Any = self[r, c]
return result
def _snake_case ( self , snake_case , snake_case ) -> Any:
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
a__ : Any = v.transpose()
a__ : Optional[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _A ( ):
# a^(-1)
a__ : Union[str, Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
a__ : Any = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
a__ : Tuple = Matrix(3 , 1 , 0 )
a__ , a__ , a__ : Optional[Any] = 1, 2, -3
a__ : str = Matrix(3 , 1 , 0 )
a__ , a__ , a__ : List[Any] = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}""" )
def _A ( ):
import doctest
doctest.testmod()
testa()
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
SCREAMING_SNAKE_CASE__ : int = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
SCREAMING_SNAKE_CASE__ : Tuple = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _snake_case ( self ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _snake_case ( self , snake_case , snake_case , snake_case = 1 , snake_case = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 629 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = params
a__ : List[str] = np.array(snake_case )
a__ : Dict = np.array([len(snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , snake_case ) -> int:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.lengths )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = self.params.max_model_input_size
a__ : Tuple = self.lengths > max_len
logger.info(F"""Splitting {sum(snake_case )} too long sequences.""" )
def divide_chunks(snake_case , snake_case ):
return [l[i : i + n] for i in range(0 , len(snake_case ) , snake_case )]
a__ : Any = []
a__ : List[Any] = []
if self.params.mlm:
a__ , a__ : Any = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
a__ , a__ : Any = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a__ : List[str] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a__ : Union[str, Any] = np.insert(snake_case , 0 , snake_case )
if sub_s[-1] != sep_id:
a__ : Tuple = np.insert(snake_case , len(snake_case ) , snake_case )
assert len(snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(snake_case )
new_tok_ids.extend(snake_case )
new_lengths.extend([len(snake_case ) for l in sub_seqs] )
a__ : int = np.array(snake_case )
a__ : List[Any] = np.array(snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = len(self )
a__ : Optional[int] = self.lengths > 11
a__ : List[Any] = self.token_ids[indices]
a__ : str = self.lengths[indices]
a__ : Optional[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ : int = self.params.special_tok_ids["unk_token"]
a__ : List[str] = len(self )
a__ : Dict = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a__ : str = (unk_occs / self.lengths) < 0.5
a__ : Any = self.token_ids[indices]
a__ : Dict = self.lengths[indices]
a__ : Union[str, Any] = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def _snake_case ( self ) -> str:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = [t[0] for t in batch]
a__ : List[str] = [t[1] for t in batch]
assert len(snake_case ) == len(snake_case )
# Max for paddings
a__ : Any = max(snake_case )
# Pad token ids
if self.params.mlm:
a__ : Optional[int] = self.params.special_tok_ids["pad_token"]
else:
a__ : List[str] = self.params.special_tok_ids["unk_token"]
a__ : Any = [list(t.astype(snake_case ) ) + [pad_idx] * (max_seq_len_ - len(snake_case )) for t in token_ids]
assert len(tk_ ) == len(snake_case )
assert all(len(snake_case ) == max_seq_len_ for t in tk_ )
a__ : Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
a__ : Optional[int] = torch.tensor(snake_case ) # (bs)
return tk_t, lg_t
| 629 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def _A ( ):
a__ : Optional[Any] = 9
a__ : Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a__ : List[str] = kruskal(lowerCamelCase , lowerCamelCase )
a__ : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase ) == sorted(lowerCamelCase )
| 629 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _A ( lowerCamelCase ): # picklable for multiprocessing
return x.sum()
def _A ( lowerCamelCase ): # picklable for multiprocessing
return i + 1
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : int
_UpperCamelCase : str
class __lowerCAmelCase ( _UpperCamelCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Tuple = {}
a__ : Any = []
a__ : Optional[int] = 1
a__ : Any = [1, 2]
a__ : str = {"a": 1, "b": 2}
a__ : List[Any] = {"a": [1, 2], "b": [3, 4]}
a__ : List[str] = {"a": {"1": 1}, "b": 2}
a__ : Union[str, Any] = {"a": 1, "b": 2, "c": 3, "d": 4}
a__ : Tuple = {}
a__ : str = []
a__ : str = 2
a__ : List[str] = [2, 3]
a__ : Union[str, Any] = {"a": 2, "b": 3}
a__ : Tuple = {"a": [2, 3], "b": [4, 5]}
a__ : Optional[int] = {"a": {"1": 2}, "b": 3}
a__ : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
a__ : Dict = 2
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
a__ : Any = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
a__ : Any = {"a": 2, "b": 0, "c": 2}
a__ : Optional[int] = {
"a": np.eye(2 ).astype(snake_case ),
"b": np.zeros(3 ).astype(snake_case ),
"c": np.ones(2 ).astype(snake_case ),
}
self.assertEqual(map_nested(snake_case , snake_case , map_numpy=snake_case ) , snake_case )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case , snake_case , map_numpy=snake_case ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case , snake_case , map_numpy=snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case , snake_case , map_numpy=snake_case , num_proc=snake_case ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case ): # can't pickle a local lambda
map_nested(lambda snake_case : x + 1 , snake_case , num_proc=snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : str = {"a": 1, "b": 2}
a__ : Optional[Any] = {"a": 3, "b": 4}
a__ : Dict = {"a": 5, "b": 6}
a__ : int = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case , snake_case , snake_case ) ) , snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
class __lowerCAmelCase :
_UpperCamelCase : Union[str, Any] = """bar"""
a__ : int = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
a__ : Union[str, Any] = {F"""{i}""": i for i in range(lowerCamelCase )}
a__ : Any = map_nested(lambda lowerCamelCase : x + 10 , lowerCamelCase , num_proc=lowerCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __lowerCAmelCase ( _UpperCamelCase ):
@require_tf
def _snake_case ( self ) -> str:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
a__ : Any = layers.Dense(2 )
def gen_random_output():
a__ : List[Any] = tf.random.uniform((1, 3) )
return model(snake_case ).numpy()
with temp_seed(42 , set_tensorflow=snake_case ):
a__ : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=snake_case ):
a__ : List[Any] = gen_random_output()
a__ : Optional[Any] = gen_random_output()
np.testing.assert_equal(snake_case , snake_case )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _snake_case ( self ) -> Dict:
"""simple docstring"""
import torch
def gen_random_output():
a__ : int = torch.nn.Linear(3 , 2 )
a__ : Optional[Any] = torch.rand(1 , 3 )
return model(snake_case ).detach().numpy()
with temp_seed(42 , set_pytorch=snake_case ):
a__ : Tuple = gen_random_output()
with temp_seed(42 , set_pytorch=snake_case ):
a__ : Dict = gen_random_output()
a__ : Optional[Any] = gen_random_output()
np.testing.assert_equal(snake_case , snake_case )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
a__ : Union[str, Any] = gen_random_output()
with temp_seed(42 ):
a__ : Optional[int] = gen_random_output()
a__ : List[Any] = gen_random_output()
np.testing.assert_equal(snake_case , snake_case )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def _A ( lowerCamelCase ):
a__ : int = NestedDataStructure(lowerCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = NestedDataStructure(lowerCamelCase ).flatten()
assert output == expected_output
def _A ( ):
a__ : List[str] = A(x=1 , y="foobar" )
a__ : Dict = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase ) == expected_output
a__ : int = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
a__ : int = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase ) == expected_output
with pytest.raises(lowerCamelCase ):
asdict([1, A(x=10 , y="foo" )] )
def _A ( lowerCamelCase ):
return text.split()
def _A ( lowerCamelCase ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _A ( ):
with Pool(2 ) as pool:
a__ : str = list(iflatmap_unordered(lowerCamelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
a__ : Tuple = list(iflatmap_unordered(lowerCamelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
a__ : Tuple = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase ) == 4
| 629 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from __future__ import annotations
from typing import TypedDict
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : str
_UpperCamelCase : int
def _A ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(lowerCamelCase ) )]
def _A ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
a__ : Any = all_rotations(lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
a__ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowerCamelCase ),
}
return response
def _A ( lowerCamelCase , lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
a__ : str = int(lowerCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(lowerCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
a__ : List[str] = [""] * len(lowerCamelCase )
for _ in range(len(lowerCamelCase ) ):
for i in range(len(lowerCamelCase ) ):
a__ : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = """Provide a string that I will generate its BWT transform: """
SCREAMING_SNAKE_CASE__ : Optional[Any] = input(entry_msg).strip()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
SCREAMING_SNAKE_CASE__ : Optional[int] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Dict = ["""speech"""]
def __init__( self , *snake_case , **snake_case ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["speech"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Dict = ["""speech"""]
def __init__( self , *snake_case , **snake_case ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["speech"] )
| 629 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = (DPMSolverSinglestepScheduler,)
_UpperCamelCase : List[str] = (("""num_inference_steps""", 25),)
def _snake_case ( self , **snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**snake_case )
return config
def _snake_case ( self , snake_case=0 , **snake_case ) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = dict(self.forward_default_kwargs )
a__ : Union[str, Any] = kwargs.pop("num_inference_steps" , snake_case )
a__ : Any = self.dummy_sample
a__ : List[str] = 0.1 * sample
a__ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : List[Any] = self.get_scheduler_config(**snake_case )
a__ : Tuple = scheduler_class(**snake_case )
scheduler.set_timesteps(snake_case )
# copy over dummy past residuals
a__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case )
a__ : Dict = scheduler_class.from_pretrained(snake_case )
new_scheduler.set_timesteps(snake_case )
# copy over dummy past residuals
a__ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
a__ , a__ : Tuple = sample, sample
for t in range(snake_case , time_step + scheduler.config.solver_order + 1 ):
a__ : str = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
a__ : List[str] = new_scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> Any:
"""simple docstring"""
pass
def _snake_case ( self , snake_case=0 , **snake_case ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = dict(self.forward_default_kwargs )
a__ : Tuple = kwargs.pop("num_inference_steps" , snake_case )
a__ : Any = self.dummy_sample
a__ : Optional[int] = 0.1 * sample
a__ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Union[str, Any] = self.get_scheduler_config()
a__ : str = scheduler_class(**snake_case )
scheduler.set_timesteps(snake_case )
# copy over dummy past residuals (must be after setting timesteps)
a__ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case )
a__ : List[Any] = scheduler_class.from_pretrained(snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case )
# copy over dummy past residual (must be after setting timesteps)
a__ : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
a__ : Union[str, Any] = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
a__ : Tuple = new_scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , snake_case=None , **snake_case ) -> str:
"""simple docstring"""
if scheduler is None:
a__ : int = self.scheduler_classes[0]
a__ : Any = self.get_scheduler_config(**snake_case )
a__ : Union[str, Any] = scheduler_class(**snake_case )
a__ : int = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config(**snake_case )
a__ : Any = scheduler_class(**snake_case )
a__ : Any = 10
a__ : Optional[int] = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[int] = model(snake_case , snake_case )
a__ : str = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
return sample
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : List[Any] = 50
a__ : int = self.dummy_model()
a__ : str = self.dummy_sample_deter
scheduler.set_timesteps(snake_case )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
a__ : List[Any] = model(snake_case , snake_case )
a__ : Tuple = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
a__ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : Any = self.full_loop(scheduler=snake_case )
a__ : Dict = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
a__ : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
a__ : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
a__ : Any = UniPCMultistepScheduler.from_config(scheduler.config )
a__ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a__ : Union[str, Any] = self.full_loop(scheduler=snake_case )
a__ : Tuple = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=snake_case )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , algorithm_type="dpmsolver++" , solver_order=snake_case , solver_type=snake_case , )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case , solver_type=snake_case , prediction_type=snake_case , algorithm_type=snake_case , )
a__ : List[str] = self.full_loop(
solver_order=snake_case , solver_type=snake_case , prediction_type=snake_case , algorithm_type=snake_case , )
assert not torch.isnan(snake_case ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> str:
"""simple docstring"""
self.check_over_configs(lower_order_final=snake_case )
self.check_over_configs(lower_order_final=snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(variance_type=snake_case )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case , time_step=0 )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : int = self.full_loop()
a__ : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = self.full_loop(use_karras_sigmas=snake_case )
a__ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
a__ : Tuple = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=snake_case )
a__ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = self.scheduler_classes[0]
a__ : int = self.get_scheduler_config(thresholding=snake_case , dynamic_thresholding_ratio=0 )
a__ : List[Any] = scheduler_class(**snake_case )
a__ : str = 10
a__ : List[str] = self.dummy_model()
a__ : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case )
for i, t in enumerate(scheduler.timesteps ):
a__ : Union[str, Any] = model(snake_case , snake_case )
a__ : str = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[str] = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , snake_case = True ) -> None:
"""simple docstring"""
a__ : dict[T, list[T]] = {} # dictionary of lists
a__ : Union[str, Any] = directed
def _snake_case ( self , snake_case , snake_case ) -> GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
self.adj_list[destination_vertex].append(snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
a__ : Any = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case )
a__ : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a__ : Optional[int] = [destination_vertex]
a__ : Any = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
a__ : Optional[int] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a__ : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a__ : Tuple = [destination_vertex]
a__ : str = []
return self
def __repr__( self ) -> str:
"""simple docstring"""
return pformat(self.adj_list )
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
class __lowerCAmelCase :
def __init__( self ) -> None:
"""simple docstring"""
a__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
a__ : int = False
def _snake_case ( self , snake_case ) -> None:
"""simple docstring"""
for word in words:
self.insert(snake_case )
def _snake_case ( self , snake_case ) -> None:
"""simple docstring"""
a__ : Tuple = self
for char in word:
if char not in curr.nodes:
a__ : Tuple = TrieNode()
a__ : str = curr.nodes[char]
a__ : str = True
def _snake_case ( self , snake_case ) -> bool:
"""simple docstring"""
a__ : Dict = self
for char in word:
if char not in curr.nodes:
return False
a__ : Dict = curr.nodes[char]
return curr.is_leaf
def _snake_case ( self , snake_case ) -> None:
"""simple docstring"""
def _delete(snake_case , snake_case , snake_case ) -> bool:
if index == len(snake_case ):
# If word does not exist
if not curr.is_leaf:
return False
a__ : Optional[Any] = False
return len(curr.nodes ) == 0
a__ : Any = word[index]
a__ : List[str] = curr.nodes.get(snake_case )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
a__ : Optional[Any] = _delete(snake_case , snake_case , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , snake_case , 0 )
def _A ( lowerCamelCase , lowerCamelCase ):
if node.is_leaf:
print(lowerCamelCase , end=" " )
for key, value in node.nodes.items():
print_words(lowerCamelCase , word + key )
def _A ( ):
a__ : str = "banana bananas bandana band apple all beast".split()
a__ : Any = TrieNode()
root.insert_many(lowerCamelCase )
# print_words(root, "")
assert all(root.find(lowerCamelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _A ( lowerCamelCase , lowerCamelCase ):
print(str(lowerCamelCase ) , "works!" if passes else "doesn't work :(" )
def _A ( ):
assert test_trie()
def _A ( ):
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE__ : int = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = """ernie_m"""
_UpperCamelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , snake_case = 250_002 , snake_case = 768 , snake_case = 12 , snake_case = 12 , snake_case = 3_072 , snake_case = "gelu" , snake_case = 0.1 , snake_case = 0.1 , snake_case = 514 , snake_case = 0.02 , snake_case = 1 , snake_case = 1E-05 , snake_case=None , snake_case=False , snake_case=0.0 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , **snake_case )
a__ : Optional[int] = vocab_size
a__ : List[str] = hidden_size
a__ : Optional[int] = num_hidden_layers
a__ : str = num_attention_heads
a__ : Dict = intermediate_size
a__ : Tuple = hidden_act
a__ : Tuple = hidden_dropout_prob
a__ : Union[str, Any] = attention_probs_dropout_prob
a__ : Dict = max_position_embeddings
a__ : Tuple = initializer_range
a__ : int = layer_norm_eps
a__ : str = classifier_dropout
a__ : List[Any] = is_decoder
a__ : Optional[int] = act_dropout
| 629 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = F"""{sampling_rate}"""
a__ : List[str] = "1"
a__ : Any = "f32le"
a__ : Optional[int] = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
a__ : int = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
a__ : int = output_stream[0]
a__ : Any = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "f32le" , ):
a__ : int = F"""{sampling_rate}"""
a__ : List[str] = "1"
if format_for_conversion == "s16le":
a__ : str = 2
elif format_for_conversion == "f32le":
a__ : Optional[int] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
a__ : int = platform.system()
if system == "Linux":
a__ : Tuple = "alsa"
a__ : Any = "default"
elif system == "Darwin":
a__ : List[str] = "avfoundation"
a__ : Any = ":0"
elif system == "Windows":
a__ : Any = "dshow"
a__ : int = "default"
a__ : Dict = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
a__ : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
a__ : Optional[int] = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "f32le" , ):
if stream_chunk_s is not None:
a__ : int = stream_chunk_s
else:
a__ : str = chunk_length_s
a__ : List[Any] = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
a__ : Union[str, Any] = np.intaa
a__ : Union[str, Any] = 2
elif format_for_conversion == "f32le":
a__ : Optional[int] = np.floataa
a__ : Any = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
a__ : str = chunk_length_s / 6
a__ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
a__ : Dict = [stride_length_s, stride_length_s]
a__ : List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
a__ : List[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
a__ : Dict = datetime.datetime.now()
a__ : Optional[Any] = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
a__ : Dict = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
a__ : int = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
a__ : int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
a__ : int = b""
a__ , a__ : Dict = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
a__ : str = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
a__ : int = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
a__ : str = (_stride_left, stride_right)
a__ : List[Any] = {"raw": acc[:chunk_len], "stride": stride}
if stream:
a__ : int = False
yield item
a__ : List[Any] = stride_left
a__ : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
a__ : int = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
a__ : Any = False
yield item
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
a__ : Dict = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 629 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
def _A ( lowerCamelCase ):
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCamelCase ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = True , snake_case = None , snake_case = None , **snake_case , ) -> None:
"""simple docstring"""
super().__init__(**snake_case )
a__ : List[Any] = size if size is not None else {"shortest_edge": 256}
a__ : List[Any] = get_size_dict(snake_case , default_to_square=snake_case )
a__ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
a__ : int = get_size_dict(snake_case , param_name="crop_size" )
a__ : Optional[int] = do_resize
a__ : str = size
a__ : Tuple = do_center_crop
a__ : Dict = crop_size
a__ : Union[str, Any] = resample
a__ : Union[str, Any] = do_rescale
a__ : Union[str, Any] = rescale_factor
a__ : str = offset
a__ : Tuple = do_normalize
a__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : Tuple = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" in size:
a__ : str = get_resize_output_image_size(snake_case , size["shortest_edge"] , default_to_square=snake_case )
elif "height" in size and "width" in size:
a__ : Dict = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : Union[str, Any] = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case = True , snake_case = None , **snake_case , ) -> List[str]:
"""simple docstring"""
a__ : Tuple = image.astype(np.floataa )
if offset:
a__ : Tuple = image - (scale / 2)
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
a__ : Any = to_numpy_array(snake_case )
if do_resize:
a__ : Optional[int] = self.resize(image=snake_case , size=snake_case , resample=snake_case )
if do_center_crop:
a__ : Optional[Any] = self.center_crop(snake_case , size=snake_case )
if do_rescale:
a__ : Union[str, Any] = self.rescale(image=snake_case , scale=snake_case , offset=snake_case )
if do_normalize:
a__ : Optional[int] = self.normalize(image=snake_case , mean=snake_case , std=snake_case )
a__ : int = to_channel_dimension_format(snake_case , snake_case )
return image
def _snake_case ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ) -> PIL.Image.Image:
"""simple docstring"""
a__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
a__ : Union[str, Any] = resample if resample is not None else self.resample
a__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ : str = do_rescale if do_rescale is not None else self.do_rescale
a__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : int = offset if offset is not None else self.offset
a__ : int = do_normalize if do_normalize is not None else self.do_normalize
a__ : List[str] = image_mean if image_mean is not None else self.image_mean
a__ : Tuple = image_std if image_std is not None else self.image_std
a__ : Dict = size if size is not None else self.size
a__ : Tuple = get_size_dict(snake_case , default_to_square=snake_case )
a__ : Any = crop_size if crop_size is not None else self.crop_size
a__ : List[Any] = get_size_dict(snake_case , param_name="crop_size" )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
a__ : Dict = make_batched(snake_case )
a__ : Dict = [
[
self._preprocess_image(
image=snake_case , do_resize=snake_case , size=snake_case , resample=snake_case , do_center_crop=snake_case , crop_size=snake_case , do_rescale=snake_case , rescale_factor=snake_case , offset=snake_case , do_normalize=snake_case , image_mean=snake_case , image_std=snake_case , data_format=snake_case , )
for img in video
]
for video in videos
]
a__ : int = {"pixel_values": videos}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 629 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case=14 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> Dict:
"""simple docstring"""
a__ : Any = parent
a__ : Union[str, Any] = batch_size
a__ : Union[str, Any] = seq_length
a__ : List[Any] = is_training
a__ : Tuple = use_token_type_ids
a__ : Optional[Any] = use_input_mask
a__ : Optional[Any] = use_labels
a__ : List[str] = use_mc_token_ids
a__ : List[Any] = vocab_size
a__ : Tuple = hidden_size
a__ : str = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : Any = intermediate_size
a__ : List[Any] = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : int = attention_probs_dropout_prob
a__ : Union[str, Any] = max_position_embeddings
a__ : Union[str, Any] = type_vocab_size
a__ : List[str] = type_sequence_label_size
a__ : Any = initializer_range
a__ : List[Any] = num_labels
a__ : Optional[Any] = num_choices
a__ : List[Any] = scope
a__ : List[str] = self.vocab_size - 1
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : int = None
if self.use_input_mask:
a__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a__ : int = None
if self.use_token_type_ids:
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : List[str] = None
if self.use_mc_token_ids:
a__ : List[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
a__ : int = None
a__ : str = None
a__ : str = None
if self.use_labels:
a__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
a__ : Optional[Any] = self.get_config()
a__ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , *snake_case ) -> Dict:
"""simple docstring"""
a__ : int = CTRLModel(config=snake_case )
model.to(snake_case )
model.eval()
model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
model(snake_case , token_type_ids=snake_case )
a__ : List[str] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , *snake_case ) -> Dict:
"""simple docstring"""
a__ : Tuple = CTRLLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
a__ : Union[str, Any] = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : str = config_and_inputs
a__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ) -> int:
"""simple docstring"""
a__ : int = self.num_labels
a__ : Dict = CTRLForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : str = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : int = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : int = (CTRLLMHeadModel,) if is_torch_available() else ()
_UpperCamelCase : List[str] = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = True
_UpperCamelCase : str = False
_UpperCamelCase : int = False
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Dict = CTRLModelTester(self )
a__ : Union[str, Any] = ConfigTester(self , config_class=snake_case , n_embd=37 )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*snake_case )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[Any] = CTRLModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
pass
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(snake_case )
a__ : int = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=snake_case ) # Legal the president is
a__ : List[Any] = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ : Any = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 629 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : str = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""squeezebert/squeezebert-uncased""": 5_1_2,
"""squeezebert/squeezebert-mnli""": 5_1_2,
"""squeezebert/squeezebert-mnli-headless""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = SqueezeBertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Dict:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : Tuple = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Dict = do_lower_case
a__ : int = strip_accents
a__ : Dict = tokenize_chinese_chars
a__ : Optional[Any] = normalizer_class(**snake_case )
a__ : Tuple = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> int:
"""simple docstring"""
a__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : List[Any] = [self.sep_token_id]
a__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case=13 , snake_case=30 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , snake_case=3 , snake_case=0.6 , snake_case=None , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = parent
a__ : Any = batch_size
a__ : Dict = image_size
a__ : List[Any] = patch_size
a__ : str = num_channels
a__ : Optional[Any] = is_training
a__ : Dict = use_labels
a__ : List[Any] = hidden_size
a__ : List[str] = num_hidden_layers
a__ : int = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : Any = hidden_act
a__ : Dict = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : List[str] = type_sequence_label_size
a__ : Optional[int] = initializer_range
a__ : int = mask_ratio
a__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a__ : str = (image_size // patch_size) ** 2
a__ : Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Tuple = None
if self.use_labels:
a__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> Dict:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = ViTMAEModel(config=snake_case )
model.to(snake_case )
model.eval()
a__ : List[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> Any:
"""simple docstring"""
a__ : int = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
a__ : Any = model(snake_case )
a__ : Any = (self.image_size // self.patch_size) ** 2
a__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
a__ : Tuple = 1
a__ : Optional[Any] = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
a__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Tuple = model(snake_case )
a__ : str = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = self.prepare_config_and_inputs()
a__ , a__ , a__ : int = config_and_inputs
a__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : List[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_UpperCamelCase : Optional[int] = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
_UpperCamelCase : Dict = False
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Union[str, Any] = False
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = ViTMAEModelTester(self )
a__ : Any = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
pass
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ , a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(snake_case )
a__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Tuple = [*signature.parameters.keys()]
a__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> Tuple:
"""simple docstring"""
np.random.seed(2 )
a__ : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
a__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a__ : List[Any] = torch.from_numpy(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a__ : Any = pt_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Any = model_class(snake_case )
model.to(snake_case )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
a__ : List[str] = model(**self._prepare_for_class(snake_case , snake_case ) )
a__ : Union[str, Any] = outputs[0].cpu().numpy()
a__ : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
a__ : Union[str, Any] = model_class.from_pretrained(snake_case )
model.to(snake_case )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(snake_case , snake_case ) )
# Make sure we don't have nans
a__ : str = after_outputs[0].cpu().numpy()
a__ : str = 0
a__ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ) -> Any:
"""simple docstring"""
pass
@slow
def _snake_case ( self ) -> Dict:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] = ViTMAEModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _A ( ):
a__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
a__ : int = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(snake_case )
a__ : int = self.default_image_processor
a__ : Optional[Any] = prepare_img()
a__ : Optional[int] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a__ : Optional[int] = ViTMAEConfig()
a__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
a__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
a__ : Tuple = model(**snake_case , noise=torch.from_numpy(snake_case ).to(device=snake_case ) )
# verify the logits
a__ : List[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , snake_case )
a__ : Tuple = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case ) , atol=1E-4 ) )
| 629 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """"""
_UpperCamelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCamelCase : str = None # compression type in fsspec. ex: "gzip"
_UpperCamelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , snake_case = "" , snake_case = None , snake_case = None , **snake_case ) -> List[str]:
"""simple docstring"""
super().__init__(self , **snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
a__ : Any = fsspec.open(
snake_case , mode="rb" , protocol=snake_case , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
a__ : List[Any] = os.path.basename(self.file.path.split("::" )[0] )
a__ : List[Any] = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
a__ : List[Any] = None
@classmethod
def _snake_case ( cls , snake_case ) -> Optional[Any]:
"""simple docstring"""
return super()._strip_protocol(snake_case ).lstrip("/" )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
if self.dir_cache is None:
a__ : str = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
a__ : Dict = {f["name"]: f}
def _snake_case ( self , snake_case ) -> Optional[Any]:
"""simple docstring"""
return self.file.open().read()
def _snake_case ( self , snake_case , snake_case = "rb" , snake_case=None , snake_case=True , snake_case=None , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = self._strip_protocol(snake_case )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """bz2"""
_UpperCamelCase : Tuple = """bz2"""
_UpperCamelCase : Dict = """.bz2"""
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Dict = """gzip"""
_UpperCamelCase : int = """gzip"""
_UpperCamelCase : Any = """.gz"""
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """lz4"""
_UpperCamelCase : Dict = """lz4"""
_UpperCamelCase : str = """.lz4"""
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """xz"""
_UpperCamelCase : Tuple = """xz"""
_UpperCamelCase : Optional[Any] = """.xz"""
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : str = """zstd"""
_UpperCamelCase : int = """zstd"""
_UpperCamelCase : Optional[Any] = """.zst"""
def __init__( self , snake_case , snake_case = "rb" , snake_case = None , snake_case = None , snake_case = DEFAULT_BLOCK_SIZE , **snake_case , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
fo=snake_case , mode=snake_case , target_protocol=snake_case , target_options=snake_case , block_size=snake_case , **snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
a__ : Optional[Any] = self.file.__enter__
class __lowerCAmelCase :
def __init__( self , snake_case ) -> str:
"""simple docstring"""
a__ : List[str] = file_
def __enter__( self ) -> Any:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self , *snake_case , **snake_case ) -> Tuple:
"""simple docstring"""
self._file.__exit__(*snake_case , **snake_case )
def __iter__( self ) -> Dict:
"""simple docstring"""
return iter(self._file )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
return next(self._file )
def __getattr__( self , snake_case ) -> Optional[int]:
"""simple docstring"""
return getattr(self._file , snake_case )
def fixed_enter(*snake_case , **snake_case ):
return WrappedFile(_enter(*snake_case , **snake_case ) )
a__ : Optional[Any] = fixed_enter
| 629 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=50 , snake_case=0.02 , snake_case=True , snake_case=None , ) -> List[Any]:
"""simple docstring"""
a__ : int = parent
a__ : Dict = batch_size
a__ : List[str] = seq_length
a__ : List[str] = is_training
a__ : Optional[int] = use_input_mask
a__ : List[str] = vocab_size
a__ : Union[str, Any] = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : str = intermediate_size
a__ : Union[str, Any] = hidden_act
a__ : Any = hidden_dropout_prob
a__ : List[str] = attention_probs_dropout_prob
a__ : Dict = max_position_embeddings
a__ : Union[str, Any] = initializer_range
a__ : Optional[int] = use_labels
a__ : Tuple = scope
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Optional[Any] = None
if self.use_input_mask:
a__ : str = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def _snake_case ( self ) -> str:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = self.prepare_config_and_inputs()
a__ : int = True
a__ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , **snake_case , ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = BertGenerationEncoder(config=snake_case )
model.to(snake_case )
model.eval()
a__ : List[Any] = model(snake_case , attention_mask=snake_case )
a__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , ) -> Tuple:
"""simple docstring"""
a__ : Optional[int] = True
a__ : Any = BertGenerationEncoder(config=snake_case )
model.to(snake_case )
model.eval()
a__ : Union[str, Any] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
a__ : Optional[Any] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = True
a__ : List[str] = True
a__ : int = BertGenerationDecoder(config=snake_case ).to(snake_case ).eval()
# first forward pass
a__ : int = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
a__ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a__ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
a__ : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
a__ : int = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )["hidden_states"][0]
a__ : Dict = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["hidden_states"][0]
# select random slice
a__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , *snake_case , ) -> int:
"""simple docstring"""
a__ : int = BertGenerationDecoder(snake_case )
model.to(snake_case )
model.eval()
a__ : str = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ , a__ , a__ , a__ : Optional[Any] = self.prepare_config_and_inputs()
a__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_UpperCamelCase : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Tuple = BertGenerationEncoderTester(self )
a__ : str = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _snake_case ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , a__ , a__ , a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
a__ : Union[str, Any] = "bert"
self.model_tester.create_and_check_model(snake_case , snake_case , snake_case , snake_case )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
a__ : Tuple = None
self.model_tester.create_and_check_model_as_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*snake_case )
@slow
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(snake_case )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[int] = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
a__ : Dict = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
a__ : Any = model(snake_case )[0]
a__ : Union[str, Any] = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape , snake_case )
a__ : Optional[Any] = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : str = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
a__ : List[Any] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
a__ : List[Any] = model(snake_case )[0]
a__ : Optional[int] = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape , snake_case )
a__ : List[Any] = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
| 629 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """switch_transformers"""
_UpperCamelCase : int = ["""past_key_values"""]
_UpperCamelCase : Tuple = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , snake_case=32_128 , snake_case=768 , snake_case=64 , snake_case=2_048 , snake_case=64 , snake_case=12 , snake_case=3 , snake_case=12 , snake_case=3 , snake_case=12 , snake_case=8 , snake_case=False , snake_case=0.01 , snake_case="float32" , snake_case=False , snake_case=32 , snake_case=128 , snake_case=0.1 , snake_case=1E-6 , snake_case=0.001 , snake_case=0.001 , snake_case=1.0 , snake_case="relu" , snake_case=True , snake_case=False , snake_case=True , snake_case=0 , snake_case=1 , **snake_case , ) -> Tuple:
"""simple docstring"""
a__ : Tuple = vocab_size
a__ : Optional[Any] = d_model
a__ : Optional[int] = d_kv
a__ : List[Any] = d_ff
a__ : int = num_sparse_encoder_layers
a__ : Optional[int] = num_layers
a__ : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a__ : Any = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a__ : Union[str, Any] = self.num_layers // self.num_sparse_encoder_layers
else:
a__ : int = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a__ : Any = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a__ : Optional[Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
a__ : Tuple = num_heads
a__ : str = num_experts
a__ : Optional[int] = expert_capacity
a__ : Optional[Any] = router_bias
a__ : Union[str, Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
a__ : List[Any] = router_dtype
a__ : List[str] = router_ignore_padding_tokens
a__ : Tuple = relative_attention_num_buckets
a__ : List[str] = relative_attention_max_distance
a__ : Optional[int] = dropout_rate
a__ : str = layer_norm_epsilon
a__ : Dict = initializer_factor
a__ : Optional[Any] = feed_forward_proj
a__ : Union[str, Any] = use_cache
a__ : Any = add_router_probs
a__ : Union[str, Any] = router_z_loss_coef
a__ : int = router_aux_loss_coef
a__ : Dict = self.feed_forward_proj.split("-" )
a__ : List[str] = act_info[-1]
a__ : Dict = act_info[0] == "gated"
if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a__ : Dict = "gelu_new"
super().__init__(
pad_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , **snake_case , )
| 629 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _A ( lowerCamelCase ):
a__ : str = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
a__ : Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
a__ : int = [[0.0, 0.0], [0.0, 0.0]]
a__ , a__ : Any = matrix[1][1], matrix[0][0]
a__ , a__ : Any = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
a__ : Any = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
a__ : Any = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
a__ : Optional[int] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
a__ : Union[str, Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
a__ : Optional[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
a__ : int = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
a__ : List[str] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
a__ : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
a__ : Dict = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
a__ : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
a__ : Any = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
a__ : Dict = array(lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
a__ : Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
a__ : Tuple = array(lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 629 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case , snake_case=0.0 , snake_case = None , snake_case = "geglu" , snake_case = None , snake_case = False , snake_case = False , snake_case = False , snake_case = False , snake_case = True , snake_case = "layer_norm" , snake_case = False , ) -> Tuple:
"""simple docstring"""
super().__init__()
a__ : Union[str, Any] = only_cross_attention
a__ : Optional[int] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
a__ : Tuple = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
a__ : Optional[int] = AdaLayerNorm(snake_case , snake_case )
elif self.use_ada_layer_norm_zero:
a__ : Tuple = AdaLayerNormZero(snake_case , snake_case )
else:
a__ : Optional[int] = nn.LayerNorm(snake_case , elementwise_affine=snake_case )
a__ : str = Attention(
query_dim=snake_case , heads=snake_case , dim_head=snake_case , dropout=snake_case , bias=snake_case , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
a__ : str = (
AdaLayerNorm(snake_case , snake_case )
if self.use_ada_layer_norm
else nn.LayerNorm(snake_case , elementwise_affine=snake_case )
)
a__ : Any = Attention(
query_dim=snake_case , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case , dim_head=snake_case , dropout=snake_case , bias=snake_case , upcast_attention=snake_case , ) # is self-attn if encoder_hidden_states is none
else:
a__ : int = None
a__ : List[Any] = None
# 3. Feed-forward
a__ : List[Any] = nn.LayerNorm(snake_case , elementwise_affine=snake_case )
a__ : Optional[int] = FeedForward(snake_case , dropout=snake_case , activation_fn=snake_case , final_dropout=snake_case )
# let chunk size default to None
a__ : str = None
a__ : Any = 0
def _snake_case ( self , snake_case , snake_case ) -> int:
"""simple docstring"""
a__ : Any = chunk_size
a__ : str = dim
def _snake_case ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ) -> Optional[int]:
"""simple docstring"""
if self.use_ada_layer_norm:
a__ : List[Any] = self.norma(snake_case , snake_case )
elif self.use_ada_layer_norm_zero:
a__ , a__ , a__ , a__ , a__ : List[str] = self.norma(
snake_case , snake_case , snake_case , hidden_dtype=hidden_states.dtype )
else:
a__ : Optional[int] = self.norma(snake_case )
a__ : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
a__ : Any = self.attna(
snake_case , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case , **snake_case , )
if self.use_ada_layer_norm_zero:
a__ : List[str] = gate_msa.unsqueeze(1 ) * attn_output
a__ : Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
a__ : int = (
self.norma(snake_case , snake_case ) if self.use_ada_layer_norm else self.norma(snake_case )
)
a__ : Dict = self.attna(
snake_case , encoder_hidden_states=snake_case , attention_mask=snake_case , **snake_case , )
a__ : List[Any] = attn_output + hidden_states
# 3. Feed-forward
a__ : List[str] = self.norma(snake_case )
if self.use_ada_layer_norm_zero:
a__ : List[str] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
a__ : Dict = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
a__ : List[str] = torch.cat(
[self.ff(snake_case ) for hid_slice in norm_hidden_states.chunk(snake_case , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
a__ : Optional[int] = self.ff(snake_case )
if self.use_ada_layer_norm_zero:
a__ : int = gate_mlp.unsqueeze(1 ) * ff_output
a__ : List[Any] = ff_output + hidden_states
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case , snake_case = None , snake_case = 4 , snake_case = 0.0 , snake_case = "geglu" , snake_case = False , ) -> List[str]:
"""simple docstring"""
super().__init__()
a__ : Union[str, Any] = int(dim * mult )
a__ : Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
a__ : Optional[Any] = GELU(snake_case , snake_case )
if activation_fn == "gelu-approximate":
a__ : Tuple = GELU(snake_case , snake_case , approximate="tanh" )
elif activation_fn == "geglu":
a__ : Dict = GEGLU(snake_case , snake_case )
elif activation_fn == "geglu-approximate":
a__ : str = ApproximateGELU(snake_case , snake_case )
a__ : Union[str, Any] = nn.ModuleList([] )
# project in
self.net.append(snake_case )
# project dropout
self.net.append(nn.Dropout(snake_case ) )
# project out
self.net.append(nn.Linear(snake_case , snake_case ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(snake_case ) )
def _snake_case ( self , snake_case ) -> List[str]:
"""simple docstring"""
for module in self.net:
a__ : int = module(snake_case )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = "none" ) -> List[str]:
"""simple docstring"""
super().__init__()
a__ : str = nn.Linear(snake_case , snake_case )
a__ : List[str] = approximate
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(snake_case , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _snake_case ( self , snake_case ) -> str:
"""simple docstring"""
a__ : Tuple = self.proj(snake_case )
a__ : List[Any] = self.gelu(snake_case )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case , snake_case ) -> str:
"""simple docstring"""
super().__init__()
a__ : int = nn.Linear(snake_case , dim_out * 2 )
def _snake_case ( self , snake_case ) -> Optional[Any]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(snake_case )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
a__ , a__ : List[Any] = self.proj(snake_case ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(snake_case )
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
super().__init__()
a__ : List[str] = nn.Linear(snake_case , snake_case )
def _snake_case ( self , snake_case ) -> Optional[int]:
"""simple docstring"""
a__ : str = self.proj(snake_case )
return x * torch.sigmoid(1.702 * x )
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case , snake_case ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
a__ : Any = nn.Embedding(snake_case , snake_case )
a__ : Dict = nn.SiLU()
a__ : Any = nn.Linear(snake_case , embedding_dim * 2 )
a__ : Any = nn.LayerNorm(snake_case , elementwise_affine=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = self.linear(self.silu(self.emb(snake_case ) ) )
a__ , a__ : str = torch.chunk(snake_case , 2 )
a__ : Dict = self.norm(snake_case ) * (1 + scale) + shift
return x
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case , snake_case ) -> Optional[int]:
"""simple docstring"""
super().__init__()
a__ : Dict = CombinedTimestepLabelEmbeddings(snake_case , snake_case )
a__ : Union[str, Any] = nn.SiLU()
a__ : Tuple = nn.Linear(snake_case , 6 * embedding_dim , bias=snake_case )
a__ : List[Any] = nn.LayerNorm(snake_case , elementwise_affine=snake_case , eps=1E-6 )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = self.linear(self.silu(self.emb(snake_case , snake_case , hidden_dtype=snake_case ) ) )
a__ , a__ , a__ , a__ , a__ , a__ : int = emb.chunk(6 , dim=1 )
a__ : Dict = self.norm(snake_case ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = 1E-5 ) -> str:
"""simple docstring"""
super().__init__()
a__ : Tuple = num_groups
a__ : Dict = eps
if act_fn is None:
a__ : str = None
else:
a__ : Dict = get_activation(snake_case )
a__ : Dict = nn.Linear(snake_case , out_dim * 2 )
def _snake_case ( self , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
if self.act:
a__ : Dict = self.act(snake_case )
a__ : Optional[Any] = self.linear(snake_case )
a__ : int = emb[:, :, None, None]
a__ , a__ : int = emb.chunk(2 , dim=1 )
a__ : Union[str, Any] = F.group_norm(snake_case , self.num_groups , eps=self.eps )
a__ : List[str] = x * (1 + scale) + shift
return x
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
import random
class __lowerCAmelCase :
@staticmethod
def _snake_case ( snake_case ) -> tuple[list[int], list[int]]:
"""simple docstring"""
a__ : Union[str, Any] = [ord(snake_case ) for i in text]
a__ : Optional[Any] = []
a__ : Tuple = []
for i in plain:
a__ : str = random.randint(1 , 300 )
a__ : List[Any] = (i + k) * k
cipher.append(snake_case )
key.append(snake_case )
return cipher, key
@staticmethod
def _snake_case ( snake_case , snake_case ) -> str:
"""simple docstring"""
a__ : Tuple = []
for i in range(len(snake_case ) ):
a__ : str = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case ) )
return "".join(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 629 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# Initialise PyTorch model
a__ : Union[str, Any] = LxmertConfig.from_json_file(lowerCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
a__ : Optional[int] = LxmertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCAmelCase :
_UpperCamelCase : Union[str, Any] = None
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
a__ : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , snake_case )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[str] = os.path.join(snake_case , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case )
a__ : str = self.feature_extraction_class.from_json_file(snake_case )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
a__ : Optional[Any] = self.feature_extraction_class.from_pretrained(snake_case )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = self.feature_extraction_class()
self.assertIsNotNone(snake_case )
| 629 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 1 |
import numpy as np
import qiskit
def _A ( lowerCamelCase = 8 , lowerCamelCase = None ):
a__ : Dict = np.random.default_rng(seed=lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
a__ : Dict = 6 * key_len
# Measurement basis for Alice's qubits.
a__ : Union[str, Any] = rng.integers(2 , size=lowerCamelCase )
# The set of states Alice will prepare.
a__ : Optional[int] = rng.integers(2 , size=lowerCamelCase )
# Measurement basis for Bob's qubits.
a__ : Tuple = rng.integers(2 , size=lowerCamelCase )
# Quantum Circuit to simulate BB84
a__ : Union[str, Any] = qiskit.QuantumCircuit(lowerCamelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
a__ : int = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
a__ : int = qiskit.execute(lowerCamelCase , lowerCamelCase , shots=1 , seed_simulator=lowerCamelCase )
# Returns the result of measurement.
a__ : Tuple = job.result().get_counts(lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
a__ : List[Any] = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
a__ : Optional[int] = gen_key[:key_len] if len(lowerCamelCase ) >= key_len else gen_key.ljust(lowerCamelCase , "0" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 629 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Dict = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : str = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
SCREAMING_SNAKE_CASE__ : Tuple = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Any = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
SCREAMING_SNAKE_CASE__ : List[str] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
SCREAMING_SNAKE_CASE__ : Tuple = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(_UpperCamelCase )
class __lowerCAmelCase :
def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , return_tensors=snake_case , return_attention_mask=snake_case , **snake_case , )
elif titles is None or texts is None:
a__ : Any = titles if texts is None else texts
return super().__call__(
snake_case , snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , return_tensors=snake_case , return_attention_mask=snake_case , **snake_case , )
a__ : str = titles if not isinstance(snake_case , snake_case ) else [titles]
a__ : Optional[Any] = texts if not isinstance(snake_case , snake_case ) else [texts]
a__ : str = len(snake_case )
a__ : Tuple = questions if not isinstance(snake_case , snake_case ) else [questions] * n_passages
if len(snake_case ) != len(snake_case ):
raise ValueError(
F"""There should be as many titles than texts but got {len(snake_case )} titles and {len(snake_case )} texts.""" )
a__ : Any = super().__call__(snake_case , snake_case , padding=snake_case , truncation=snake_case )["input_ids"]
a__ : Optional[int] = super().__call__(snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case )["input_ids"]
a__ : str = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case , snake_case )
]
}
if return_attention_mask is not False:
a__ : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a__ : Any = attention_mask
return self.pad(snake_case , padding=snake_case , max_length=snake_case , return_tensors=snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case = 16 , snake_case = 64 , snake_case = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
a__ : Any = reader_input["input_ids"]
a__ , a__ , a__ : List[Any] = reader_output[:3]
a__ : List[Any] = len(snake_case )
a__ : str = sorted(range(snake_case ) , reverse=snake_case , key=relevance_logits.__getitem__ )
a__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
a__ : str = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a__ : List[str] = sequence_ids.index(self.pad_token_id )
else:
a__ : Tuple = len(snake_case )
a__ : str = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case , top_spans=snake_case , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case , start_index=snake_case , end_index=snake_case , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
a__ : List[str] = []
for start_index, start_score in enumerate(snake_case ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a__ : Optional[int] = sorted(snake_case , key=lambda snake_case : x[1] , reverse=snake_case )
a__ : Dict = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
a__ : List[Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCamelCase )
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
| 629 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE__ : str = datasets.load_iris()
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(data["""data"""])
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(data["""target"""])
SCREAMING_SNAKE_CASE__ : Dict = data["""target_names"""]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = train_test_split(X, y)
def _A ( lowerCamelCase , lowerCamelCase ):
return np.linalg.norm(np.array(lowerCamelCase ) - np.array(lowerCamelCase ) )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=5 ):
a__ : Dict = zip(lowerCamelCase , lowerCamelCase )
# List of distances of all points from the point to be classified
a__ : List[str] = []
for data_point in data:
a__ : str = euclidean_distance(data_point[0] , lowerCamelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a__ : Tuple = [i[1] for i in sorted(lowerCamelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a__ : List[Any] = Counter(lowerCamelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
import json
import sys
def _A ( lowerCamelCase , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as f:
a__ : Union[str, Any] = json.load(lowerCamelCase )
a__ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(lowerCamelCase ):
a__ : Any = results[benchmark_name]
a__ : int = benchmark_name.split("/" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
a__ : Any = "| metric |"
a__ : int = "|--------|"
a__ : Optional[int] = "| new / old (diff) |"
for metric_name in sorted(lowerCamelCase ):
a__ : Optional[Any] = benchmark_res[metric_name]
a__ : Tuple = metric_vals["new"]
a__ : Any = metric_vals.get("old" , lowerCamelCase )
a__ : Optional[int] = metric_vals.get("diff" , lowerCamelCase )
a__ : Optional[Any] = F""" {new_val:f}""" if isinstance(lowerCamelCase , (int, float) ) else "None"
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(lowerCamelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(lowerCamelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(lowerCamelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = sys.argv[1]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 629 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 1 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Dict = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : str = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : List[str] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : List[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Any = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : List[str] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> str:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : int = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> str:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Any = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Any:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : int = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : str = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Dict = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> int:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : int = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : int = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> int:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : List[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : List[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Dict = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Dict = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : int = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Dict = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> int:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : int = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> int:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> str:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : List[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *snake_case , **snake_case ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
from __future__ import annotations
from cmath import sqrt
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
a__ : Any = b * b - 4 * a * c
a__ : str = (-b + sqrt(lowerCamelCase )) / (2 * a)
a__ : Union[str, Any] = (-b - sqrt(lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _A ( ):
a__ , a__ : Optional[int] = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
def _A ( lowerCamelCase ):
if num < 0:
return False
a__ : int = num
a__ : int = 0
while num > 0:
a__ : Dict = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """xlm-roberta-xl"""
def __init__( self , snake_case=250_880 , snake_case=2_560 , snake_case=36 , snake_case=32 , snake_case=10_240 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=514 , snake_case=1 , snake_case=0.02 , snake_case=1E-05 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
a__ : Union[str, Any] = vocab_size
a__ : Dict = hidden_size
a__ : Union[str, Any] = num_hidden_layers
a__ : Any = num_attention_heads
a__ : List[str] = hidden_act
a__ : List[Any] = intermediate_size
a__ : List[Any] = hidden_dropout_prob
a__ : List[Any] = attention_probs_dropout_prob
a__ : List[str] = max_position_embeddings
a__ : str = type_vocab_size
a__ : Dict = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : Dict = position_embedding_type
a__ : Any = use_cache
a__ : Any = classifier_dropout
class __lowerCAmelCase ( _UpperCamelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a__ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 629 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
a__ : Optional[int] = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , snake_case , standard_warn=snake_case )
a__ : Optional[Any] = dict(scheduler.config )
a__ : Any = 1
a__ : int = FrozenDict(snake_case )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
a__ : List[Any] = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , snake_case , standard_warn=snake_case )
a__ : Any = dict(scheduler.config )
a__ : int = True
a__ : int = FrozenDict(snake_case )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=snake_case , segmentation_processor=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def _snake_case ( self , snake_case = "auto" ) -> str:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
a__ : Optional[int] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self ) -> Any:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , snake_case , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
a__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
a__ : Union[str, Any] = self.segmentation_model(**snake_case )
a__ : Optional[Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
a__ : List[Any] = self.numpy_to_pil(snake_case )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
a__ : Optional[Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=snake_case , image=snake_case , mask_image=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , )
| 629 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 1 |
def _A ( lowerCamelCase ):
return " ".join(
"".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 629 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class __lowerCAmelCase ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case ) -> str:
"""simple docstring"""
a__ , a__ , a__ , a__ : Union[str, Any] = hidden_states.shape
a__ : Optional[Any] = jax.image.resize(
snake_case , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a__ : Union[str, Any] = self.conv(snake_case )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = self.conv(snake_case )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : int = None
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = None
_UpperCamelCase : jnp.dtype = jnp.floataa
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = self.in_channels if self.out_channels is None else self.out_channels
a__ : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a__ : Tuple = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a__ : Optional[Any] = nn.Dense(snake_case , dtype=self.dtype )
a__ : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a__ : int = nn.Dropout(self.dropout_prob )
a__ : Optional[int] = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a__ : Dict = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a__ : Any = None
if use_nin_shortcut:
a__ : Any = nn.Conv(
snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case=True ) -> str:
"""simple docstring"""
a__ : Tuple = hidden_states
a__ : List[Any] = self.norma(snake_case )
a__ : List[Any] = nn.swish(snake_case )
a__ : List[Any] = self.conva(snake_case )
a__ : Tuple = self.time_emb_proj(nn.swish(snake_case ) )
a__ : Optional[int] = jnp.expand_dims(jnp.expand_dims(snake_case , 1 ) , 1 )
a__ : int = hidden_states + temb
a__ : Dict = self.norma(snake_case )
a__ : Tuple = nn.swish(snake_case )
a__ : Dict = self.dropout(snake_case , snake_case )
a__ : str = self.conva(snake_case )
if self.conv_shortcut is not None:
a__ : Any = self.conv_shortcut(snake_case )
return hidden_states + residual
| 629 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , *snake_case , **snake_case ) -> None:
"""simple docstring"""
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 629 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case=13 , snake_case=30 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , snake_case=3 , snake_case=0.6 , snake_case=None , ) -> List[str]:
"""simple docstring"""
a__ : Tuple = parent
a__ : Any = batch_size
a__ : List[Any] = image_size
a__ : Optional[int] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = is_training
a__ : List[str] = use_labels
a__ : int = hidden_size
a__ : Optional[int] = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Tuple = hidden_act
a__ : Tuple = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : int = type_sequence_label_size
a__ : int = initializer_range
a__ : Union[str, Any] = mask_ratio
a__ : Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a__ : Tuple = (image_size // patch_size) ** 2
a__ : List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : str = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> int:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
a__ : List[str] = TFViTMAEModel(config=snake_case )
a__ : int = model(snake_case , training=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = TFViTMAEForPreTraining(snake_case )
a__ : Dict = model(snake_case , training=snake_case )
# expected sequence length = num_patches
a__ : Union[str, Any] = (self.image_size // self.patch_size) ** 2
a__ : Any = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
a__ : Any = 1
a__ : Any = TFViTMAEForPreTraining(snake_case )
a__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Dict = model(snake_case , training=snake_case )
a__ : Dict = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = self.prepare_config_and_inputs()
((a__) , (a__) , (a__)) : Dict = config_and_inputs
a__ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : List[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_UpperCamelCase : Optional[int] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[Any] = False
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : List[Any] = TFViTMAEModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _snake_case ( self ) -> str:
"""simple docstring"""
pass
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , tf.keras.layers.Layer ) )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(snake_case )
a__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def _snake_case ( self ) -> str:
"""simple docstring"""
np.random.seed(2 )
a__ , a__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
a__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(snake_case )
a__ : Any = self._prepare_for_class(snake_case , snake_case )
a__ : Union[str, Any] = model(snake_case , noise=snake_case )
a__ : str = copy.deepcopy(self._prepare_for_class(snake_case , snake_case ) )
a__ : str = model(**snake_case , noise=snake_case )
a__ : List[str] = outputs_dict[0].numpy()
a__ : int = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def _snake_case ( self ) -> Any:
"""simple docstring"""
np.random.seed(2 )
a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Any = int((config.image_size // config.patch_size) ** 2 )
a__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(snake_case ):
a__ : int = {}
for k, v in inputs_dict.items():
if tf.is_tensor(snake_case ):
a__ : Optional[int] = v.numpy()
else:
a__ : str = np.array(snake_case )
return inputs_np_dict
for model_class in self.all_model_classes:
a__ : Tuple = model_class(snake_case )
a__ : Dict = self._prepare_for_class(snake_case , snake_case )
a__ : Tuple = prepare_numpy_arrays(snake_case )
a__ : str = model(snake_case , noise=snake_case )
a__ : Tuple = model(**snake_case , noise=snake_case )
self.assert_outputs_same(snake_case , snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
np.random.seed(2 )
a__ : Optional[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
a__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a__ : List[str] = tf.constant(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a__ : str = tf_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
np.random.seed(2 )
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(snake_case )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(snake_case , snake_case ),)
if isinstance(snake_case , snake_case )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(snake_case , "_keras_serializable" , snake_case )
}
a__ : Dict = int((config.image_size // config.patch_size) ** 2 )
a__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a__ : Tuple = tf.convert_to_tensor(snake_case )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
a__ : str = main_layer_class(snake_case )
a__ : Union[str, Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
a__ : Union[str, Any] = tf.keras.Model(snake_case , outputs=main_layer(snake_case ) )
a__ : Optional[int] = model(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict = os.path.join(snake_case , "keras_model.h5" )
model.save(snake_case )
a__ : Optional[Any] = tf.keras.models.load_model(
snake_case , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(snake_case , tf.keras.Model )
a__ : Any = model(snake_case )
self.assert_outputs_same(snake_case , snake_case )
@slow
def _snake_case ( self ) -> str:
"""simple docstring"""
np.random.seed(2 )
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : int = int((config.image_size // config.patch_size) ** 2 )
a__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(snake_case )
a__ : str = self._prepare_for_class(snake_case , snake_case )
a__ : List[Any] = model(snake_case , noise=snake_case )
if model_class.__name__ == "TFViTMAEModel":
a__ : Tuple = outputs.last_hidden_state.numpy()
a__ : Any = 0
else:
a__ : Dict = outputs.logits.numpy()
a__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case , saved_model=snake_case )
a__ : List[str] = model_class.from_pretrained(snake_case )
a__ : Optional[Any] = model(snake_case , noise=snake_case )
if model_class.__name__ == "TFViTMAEModel":
a__ : int = after_outputs["last_hidden_state"].numpy()
a__ : List[Any] = 0
else:
a__ : Union[str, Any] = after_outputs["logits"].numpy()
a__ : Optional[int] = 0
a__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1E-5 )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
np.random.seed(2 )
a__ , a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
a__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a__ : Optional[int] = model_class(snake_case )
a__ : Tuple = self._prepare_for_class(snake_case , snake_case )
a__ : List[str] = model(snake_case , noise=snake_case )
a__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(snake_case )
a__ : str = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
a__ : str = model_class.from_config(model.config )
a__ : Any = new_model(snake_case ) # Build model
new_model.set_weights(model.get_weights() )
a__ : Union[str, Any] = new_model(snake_case , noise=snake_case )
self.assert_outputs_same(snake_case , snake_case )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
pass
@slow
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : str = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(snake_case )
def _A ( ):
a__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
a__ : Optional[int] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
a__ : List[Any] = self.default_image_processor
a__ : int = prepare_img()
a__ : int = image_processor(images=snake_case , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a__ : Dict = ViTMAEConfig()
a__ : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
a__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
a__ : List[str] = model(**snake_case , noise=snake_case )
# verify the logits
a__ : Any = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , snake_case )
a__ : Union[str, Any] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case , atol=1E-4 )
| 629 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=64 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> Dict:
"""simple docstring"""
a__ : List[Any] = parent
a__ : Dict = batch_size
a__ : Union[str, Any] = seq_length
a__ : Dict = is_training
a__ : Optional[Any] = use_input_mask
a__ : str = use_token_type_ids
a__ : Optional[int] = use_labels
a__ : List[str] = vocab_size
a__ : Dict = hidden_size
a__ : Optional[int] = embedding_size
a__ : Optional[int] = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : Dict = intermediate_size
a__ : Any = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : List[Any] = max_position_embeddings
a__ : Optional[int] = type_vocab_size
a__ : Optional[Any] = type_sequence_label_size
a__ : Dict = initializer_range
a__ : Union[str, Any] = num_labels
a__ : Optional[int] = num_choices
a__ : Optional[Any] = scope
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : List[Any] = None
if self.use_input_mask:
a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a__ : List[str] = None
if self.use_token_type_ids:
a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : str = None
a__ : Any = None
a__ : List[str] = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Any = ids_tensor([self.batch_size] , self.num_choices )
a__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> Any:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[int] = MegatronBertModel(config=snake_case )
model.to(snake_case )
model.eval()
a__ : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
a__ : List[Any] = model(snake_case , token_type_ids=snake_case )
a__ : List[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
a__ : str = MegatronBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
a__ : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
"""simple docstring"""
a__ : str = MegatronBertForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
a__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]:
"""simple docstring"""
a__ : Union[str, Any] = MegatronBertForNextSentencePrediction(config=snake_case )
model.to(snake_case )
model.eval()
a__ : Any = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
"""simple docstring"""
a__ : Dict = MegatronBertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
a__ : Dict = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , next_sentence_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
a__ : Dict = MegatronBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
a__ : Tuple = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = self.num_labels
a__ : Dict = MegatronBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
a__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = self.num_labels
a__ : Union[str, Any] = MegatronBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
a__ : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
a__ : Dict = self.num_choices
a__ : str = MegatronBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
a__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Tuple = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : str = config_and_inputs
a__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : List[str] = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
# test_resize_embeddings = False
_UpperCamelCase : Tuple = False
def _snake_case ( self , snake_case , snake_case , snake_case=False ) -> str:
"""simple docstring"""
a__ : Optional[int] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
a__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
a__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Dict = MegatronBertModelTester(self )
a__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _snake_case ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case )
def _A ( lowerCamelCase ):
return torch.tensor(
lowerCamelCase , dtype=torch.long , device=lowerCamelCase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[int] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
a__ : Tuple = os.path.join(os.environ["MYDIR"] , snake_case )
a__ : List[Any] = MegatronBertModel.from_pretrained(snake_case )
model.to(snake_case )
model.half()
a__ : Optional[int] = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
a__ : Tuple = model(snake_case )[0]
a__ : Any = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , snake_case )
a__ : Tuple = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
a__ : List[Any] = output[0, ii, jj]
a__ : int = expected[3 * ii + jj]
a__ : str = "ii={} jj={} a={} b={}".format(snake_case , snake_case , snake_case , snake_case )
self.assertTrue(math.isclose(snake_case , snake_case , rel_tol=snake_case , abs_tol=snake_case ) , msg=snake_case )
| 629 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _snake_case ( *snake_case , **snake_case ) -> int:
"""simple docstring"""
pass
def _A ( lowerCamelCase ):
a__ : int = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _A ( lowerCamelCase ):
a__ : int = np.array(lowerCamelCase )
a__ : List[str] = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
_UpperCamelCase : Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCamelCase : Dict = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = MaskGenerationPipeline(model=snake_case , image_processor=snake_case )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
pass
@slow
@require_torch
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[int] = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
a__ : Dict = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
a__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_967},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_909},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_879},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_834},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_716},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_612},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_552},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_532},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_516},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_499},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_483},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_464},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_408},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_335},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_326},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_262},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_999},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_986},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_984},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_873},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Any = "facebook/sam-vit-huge"
a__ : List[Any] = pipeline("mask-generation" , model=snake_case )
a__ : Dict = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
a__ : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_210},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
] , )
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
SCREAMING_SNAKE_CASE__ : Tuple = """Input must be a string of 8 numbers plus letter"""
SCREAMING_SNAKE_CASE__ : str = """TRWAGMYFPDXBNJZSQVHLCKE"""
def _A ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
a__ : Dict = F"""Expected string as input, found {type(lowerCamelCase ).__name__}"""
raise TypeError(lowerCamelCase )
a__ : Tuple = spanish_id.replace("-" , "" ).upper()
if len(lowerCamelCase ) != 9:
raise ValueError(lowerCamelCase )
try:
a__ : Any = int(spanish_id_clean[0:8] )
a__ : List[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCamelCase ) from ex
if letter.isdigit():
raise ValueError(lowerCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 1 |
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
SCREAMING_SNAKE_CASE__ : Dict = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
SCREAMING_SNAKE_CASE__ : str = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , snake_case , snake_case , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ) -> Optional[Any]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a__ : int = np.array([re.sub(snake_case , "" , snake_case ) for x in predictions] )
a__ : List[Any] = np.array([re.sub(snake_case , "" , snake_case ) for x in references] )
else:
a__ : str = np.asarray(snake_case )
a__ : Union[str, Any] = np.asarray(snake_case )
if ignore_case:
a__ : Optional[int] = np.char.lower(snake_case )
a__ : int = np.char.lower(snake_case )
if ignore_punctuation:
a__ : List[str] = string.punctuation.maketrans("" , "" , string.punctuation )
a__ : str = np.char.translate(snake_case , table=snake_case )
a__ : Optional[int] = np.char.translate(snake_case , table=snake_case )
if ignore_numbers:
a__ : Optional[Any] = string.digits.maketrans("" , "" , string.digits )
a__ : Optional[Any] = np.char.translate(snake_case , table=snake_case )
a__ : int = np.char.translate(snake_case , table=snake_case )
a__ : Tuple = predictions == references
return {"exact_match": np.mean(snake_case ) * 100}
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = 0
_UpperCamelCase : bool = False
_UpperCamelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
a__ : List[Any] = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
a__ : int = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , snake_case )
@require_multi_gpu
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
SCREAMING_SNAKE_CASE__ : int = Accelerator(kwargs_handlers=[ddp_scaler])
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.nn.Linear(1_0_0, 2_0_0)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.prepare(model)
# Check the values changed in kwargs
SCREAMING_SNAKE_CASE__ : Dict = """"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 629 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be trained."""} )
_UpperCamelCase : Optional[str] = field(
default="""./""" ,metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
_UpperCamelCase : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path of training dataset."""} )
_UpperCamelCase : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
_UpperCamelCase : Optional[int] = field(default=2 ,metadata={"""help""": """Batch size for training."""} )
_UpperCamelCase : Optional[int] = field(default=2 ,metadata={"""help""": """Batch size for evaluation."""} )
_UpperCamelCase : Optional[float] = field(default=0.1 ,metadata={"""help""": """Value of weight decay."""} )
_UpperCamelCase : Optional[int] = field(
default=10000 ,metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
_UpperCamelCase : Optional[float] = field(default=2E-4 ,metadata={"""help""": """Learning rate fo training."""} )
_UpperCamelCase : Optional[str] = field(default="""cosine""" ,metadata={"""help""": """Learning rate."""} )
_UpperCamelCase : Optional[int] = field(
default=750 ,metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
_UpperCamelCase : Optional[int] = field(
default=16 ,metadata={"""help""": """Number of gradient accumulation steps."""} )
_UpperCamelCase : Optional[bool] = field(
default=_UpperCamelCase ,metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
_UpperCamelCase : Optional[int] = field(default=50000 ,metadata={"""help""": """Maximum number of training steps."""} )
_UpperCamelCase : Optional[int] = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_UpperCamelCase : Optional[int] = field(default=1024 ,metadata={"""help""": """Sequence lengths used for training."""} )
_UpperCamelCase : Optional[int] = field(default=1 ,metadata={"""help""": """Training seed."""} )
_UpperCamelCase : Optional[int] = field(
default=1024 ,metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} ,)
_UpperCamelCase : Optional[str] = field(
default=_UpperCamelCase ,metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
_UpperCamelCase : Optional[bool] = field(default=_UpperCamelCase ,metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
_UpperCamelCase : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
_UpperCamelCase : Optional[int] = field(default=2 ,metadata={"""help""": """Batch size used for evaluation."""} )
_UpperCamelCase : Optional[int] = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_UpperCamelCase : Optional[int] = field(default=1024 ,metadata={"""help""": """Length of sequences to be evaluated."""} )
_UpperCamelCase : Optional[int] = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
_UpperCamelCase : Optional[int] = field(default=_UpperCamelCase ,metadata={"""help""": """Number of workers used for code evaluation."""} )
_UpperCamelCase : Optional[int] = field(
default=_UpperCamelCase ,metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} ,)
_UpperCamelCase : Optional[bool] = field(
default=_UpperCamelCase ,metadata={"""help""": """Sample from the language model's output distribution."""} )
_UpperCamelCase : Optional[float] = field(default=0.2 ,metadata={"""help""": """Sampling temperature used for generation."""} )
_UpperCamelCase : Optional[int] = field(default=256 ,metadata={"""help""": """Maximum number of newly generated tokens."""} )
_UpperCamelCase : Optional[int] = field(default=0 ,metadata={"""help""": """Top-k parameter used for generation."""} )
_UpperCamelCase : Optional[float] = field(default=0.95 ,metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
_UpperCamelCase : Optional[int] = field(default=10 ,metadata={"""help""": """Number of generations to run in parallel."""} )
_UpperCamelCase : Optional[int] = field(
default=200 ,metadata={"""help""": """Number of completions to generate for each sample."""} )
_UpperCamelCase : Optional[int] = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
_UpperCamelCase : Optional[str] = field(
default="""eval_results.json""" ,metadata={"""help""": """Random seed used for evaluation."""} )
_UpperCamelCase : Optional[str] = field(
default="""0""" ,metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
_UpperCamelCase : Optional[int] = field(
default=-1 ,metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} ,)
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : Optional[int] = field(
default=_UpperCamelCase ,metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} ,)
_UpperCamelCase : Optional[str] = field(
default="""transformersbook/codeparrot""" ,metadata={"""help""": """Folder or name of dataset to process."""} )
_UpperCamelCase : Optional[str] = field(
default="""codeparrot-clean""" ,metadata={"""help""": """Folder to save processed processed dataset."""} )
_UpperCamelCase : Optional[int] = field(
default=100000 ,metadata={"""help""": """Number of files to save per JSON output file."""} )
_UpperCamelCase : Optional[str] = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
_UpperCamelCase : Optional[float] = field(
default=1000 ,metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
_UpperCamelCase : Optional[float] = field(
default=100 ,metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
_UpperCamelCase : Optional[float] = field(
default=0.25 ,metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
_UpperCamelCase : Optional[float] = field(
default=1.5 ,metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
_UpperCamelCase : Optional[float] = field(
default=0.7 ,metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
_UpperCamelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} ,)
_UpperCamelCase : Optional[bool] = field(
default=_UpperCamelCase ,metadata={"""help""": """If True, near-duplicate samples are removed."""} )
_UpperCamelCase : Optional[float] = field(
default=0.85 ,metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : Optional[str] = field(
default="""gpt2""" ,metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
_UpperCamelCase : Optional[str] = field(
default="""transformersbook/codeparrot-train""" ,metadata={"""help""": """Dataset to train tokenizer on."""} )
_UpperCamelCase : Optional[str] = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
_UpperCamelCase : Optional[int] = field(default=200000 ,metadata={"""help""": """Number of examples to train tokenizer on."""} )
_UpperCamelCase : Optional[int] = field(
default=32768 ,metadata={"""help""": """Number of examples to train the tokenizer on."""} )
_UpperCamelCase : Optional[str] = field(default="""codeparrot""" ,metadata={"""help""": """Name of new tokenizer."""} )
_UpperCamelCase : Optional[bool] = field(default=_UpperCamelCase ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} )
_UpperCamelCase : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
_UpperCamelCase : Optional[str] = field(
default="""tokenized-codeparrot-train""" ,metadata={"""help""": """Repo name of the pretokenized data."""} )
_UpperCamelCase : Optional[int] = field(default=_UpperCamelCase ,metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : Optional[str] = field(
default="""gpt2-large""" ,metadata={"""help""": """Configuration to use for model initialization."""} )
_UpperCamelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Tokenizer attached to model."""} )
_UpperCamelCase : Optional[str] = field(default="""codeparrot""" ,metadata={"""help""": """Name of the created model."""} )
_UpperCamelCase : Optional[bool] = field(default=_UpperCamelCase ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 629 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
SCREAMING_SNAKE_CASE__ : Any = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
SCREAMING_SNAKE_CASE__ : List[str] = concatenate_datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = DownloadConfig
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DownloadManager
SCREAMING_SNAKE_CASE__ : Optional[int] = DownloadMode
SCREAMING_SNAKE_CASE__ : List[str] = DownloadConfig
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DownloadMode
SCREAMING_SNAKE_CASE__ : int = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 629 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
def _A ( lowerCamelCase , lowerCamelCase=False ):
a__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
a__ : Optional[Any] = "segformer.encoder." + key
if key.startswith("backbone" ):
a__ : int = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a__ : Optional[int] = key[key.find("patch_embed" ) + len("patch_embed" )]
a__ : Union[str, Any] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowerCamelCase )-1}""" )
if "norm" in key:
a__ : Tuple = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a__ : List[str] = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
a__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowerCamelCase )-1}""" )
if "layer_norm1" in key:
a__ : Any = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
a__ : Union[str, Any] = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
a__ : Tuple = key[key.find("block" ) + len("block" )]
a__ : Tuple = key.replace(F"""block{idx}""" , F"""block.{int(lowerCamelCase )-1}""" )
if "attn.q" in key:
a__ : Optional[Any] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
a__ : Optional[int] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
a__ : List[str] = key.replace("attn" , "attention.self" )
if "fc1" in key:
a__ : Dict = key.replace("fc1" , "dense1" )
if "fc2" in key:
a__ : Dict = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
a__ : str = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
a__ : List[Any] = key.replace("linear_fuse.conv" , "linear_fuse" )
a__ : Optional[int] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a__ : List[str] = key[key.find("linear_c" ) + len("linear_c" )]
a__ : Optional[Any] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowerCamelCase )-1}""" )
if key.startswith("head" ):
a__ : int = key.replace("head" , "classifier" )
a__ : List[Any] = value
return new_state_dict
def _A ( lowerCamelCase , lowerCamelCase ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a__ : Dict = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
a__ : Any = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
a__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
a__ : List[str] = kv_bias[: config.hidden_sizes[i]]
a__ : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
a__ : Dict = kv_bias[
config.hidden_sizes[i] :
]
def _A ( ):
a__ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Tuple = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : str = SegformerConfig()
a__ : str = False
# set attributes based on model_name
a__ : Dict = "huggingface/label-files"
if "segformer" in model_name:
a__ : Any = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
a__ : Optional[int] = 150
a__ : Any = "ade20k-id2label.json"
a__ : int = (1, 150, 128, 128)
elif "city" in model_name:
a__ : List[Any] = 19
a__ : int = "cityscapes-id2label.json"
a__ : List[Any] = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
a__ : str = True
a__ : Union[str, Any] = model_name[4:6]
a__ : Optional[Any] = 1000
a__ : List[str] = "imagenet-1k-id2label.json"
a__ : Tuple = (1, 1000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
a__ : Tuple = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
a__ : Tuple = {int(lowerCamelCase ): v for k, v in idalabel.items()}
a__ : int = idalabel
a__ : List[str] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
a__ : List[Any] = [64, 128, 320, 512]
a__ : int = 256
elif size == "b2":
a__ : List[Any] = [64, 128, 320, 512]
a__ : Optional[int] = 768
a__ : Optional[Any] = [3, 4, 6, 3]
elif size == "b3":
a__ : str = [64, 128, 320, 512]
a__ : List[str] = 768
a__ : Dict = [3, 4, 18, 3]
elif size == "b4":
a__ : Tuple = [64, 128, 320, 512]
a__ : Optional[int] = 768
a__ : str = [3, 8, 27, 3]
elif size == "b5":
a__ : str = [64, 128, 320, 512]
a__ : Union[str, Any] = 768
a__ : List[str] = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
a__ : int = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
# prepare image
a__ : Union[str, Any] = prepare_img()
a__ : Any = image_processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
a__ : Union[str, Any] = torch.load(lowerCamelCase , map_location=torch.device("cpu" ) )
else:
a__ : Optional[Any] = torch.load(lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
a__ : Union[str, Any] = rename_keys(lowerCamelCase , encoder_only=lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
a__ : Optional[int] = False
a__ : Any = SegformerForImageClassification(lowerCamelCase )
else:
a__ : List[Any] = SegformerForSemanticSegmentation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
a__ : Dict = model(lowerCamelCase )
a__ : Tuple = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
a__ : List[str] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
a__ : str = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
a__ : Any = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
a__ : int = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
a__ : Optional[int] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
a__ : Any = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
a__ : Any = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
a__ : Optional[int] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
a__ : Union[str, Any] = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
a__ : Any = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
a__ : Optional[int] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
a__ : Union[str, Any] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
a__ : Optional[int] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
a__ : str = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
a__ : List[Any] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
a__ : int = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=64 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> List[str]:
"""simple docstring"""
a__ : Dict = parent
a__ : Union[str, Any] = batch_size
a__ : str = seq_length
a__ : List[Any] = is_training
a__ : Any = use_input_mask
a__ : str = use_token_type_ids
a__ : Any = use_labels
a__ : List[Any] = vocab_size
a__ : Optional[Any] = hidden_size
a__ : Optional[int] = embedding_size
a__ : Dict = num_hidden_layers
a__ : str = num_attention_heads
a__ : Any = intermediate_size
a__ : Optional[int] = hidden_act
a__ : Any = hidden_dropout_prob
a__ : List[Any] = attention_probs_dropout_prob
a__ : Union[str, Any] = max_position_embeddings
a__ : int = type_vocab_size
a__ : List[Any] = type_sequence_label_size
a__ : Union[str, Any] = initializer_range
a__ : List[Any] = num_labels
a__ : Dict = num_choices
a__ : Tuple = scope
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : str = None
if self.use_input_mask:
a__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
a__ : Optional[int] = None
if self.use_token_type_ids:
a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : List[str] = None
a__ : List[str] = None
a__ : Optional[Any] = None
if self.use_labels:
a__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
a__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
"""simple docstring"""
a__ : Dict = MobileBertModel(config=snake_case )
model.to(snake_case )
model.eval()
a__ : Tuple = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
a__ : List[Any] = model(snake_case , token_type_ids=snake_case )
a__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = MobileBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
a__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case )
model.to(snake_case )
model.eval()
a__ : Tuple = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
"""simple docstring"""
a__ : List[Any] = MobileBertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
a__ : Any = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , next_sentence_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple:
"""simple docstring"""
a__ : int = MobileBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
a__ : Any = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
a__ : List[str] = self.num_labels
a__ : Optional[Any] = MobileBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
a__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = self.num_labels
a__ : List[Any] = MobileBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
a__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = self.num_choices
a__ : Optional[Any] = MobileBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
a__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Tuple = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[str] = config_and_inputs
a__ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Any = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = True
def _snake_case ( self , snake_case , snake_case , snake_case=False ) -> str:
"""simple docstring"""
a__ : List[str] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
a__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
a__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = MobileBertModelTester(self )
a__ : Optional[int] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case )
def _A ( lowerCamelCase ):
return torch.tensor(
lowerCamelCase , dtype=torch.long , device=lowerCamelCase , )
SCREAMING_SNAKE_CASE__ : List[Any] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(snake_case )
a__ : int = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
a__ : str = model(snake_case )[0]
a__ : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case )
a__ : Optional[int] = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=snake_case , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a__ : Tuple = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : Optional[torch.FloatTensor] = None
def _A ( lowerCamelCase , lowerCamelCase=0.999 , lowerCamelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
a__ : List[Any] = []
for i in range(lowerCamelCase ):
a__ : Tuple = i / num_diffusion_timesteps
a__ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) , lowerCamelCase ) )
return torch.tensor(lowerCamelCase , dtype=torch.floataa )
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ):
@register_to_config
def __init__( self , snake_case = 1_000 , snake_case = "fixed_small_log" , snake_case = True , snake_case = 1.0 , snake_case = "epsilon" , snake_case = "squaredcos_cap_v2" , ) -> str:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
a__ : Union[str, Any] = betas_for_alpha_bar(snake_case )
a__ : int = 1.0 - self.betas
a__ : Any = torch.cumprod(self.alphas , dim=0 )
a__ : Optional[int] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
a__ : List[Any] = 1.0
# setable values
a__ : Tuple = None
a__ : int = torch.from_numpy(np.arange(0 , snake_case )[::-1].copy() )
a__ : Optional[int] = variance_type
def _snake_case ( self , snake_case , snake_case = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple:
"""simple docstring"""
a__ : List[str] = num_inference_steps
a__ : Union[str, Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
a__ : Optional[int] = (np.arange(0 , snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
a__ : Tuple = torch.from_numpy(snake_case ).to(snake_case )
def _snake_case ( self , snake_case , snake_case=None , snake_case=None , snake_case=None ) -> List[Any]:
"""simple docstring"""
if prev_timestep is None:
a__ : Union[str, Any] = t - 1
a__ : int = self.alphas_cumprod[t]
a__ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a__ : str = 1 - alpha_prod_t
a__ : Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a__ : Union[str, Any] = self.betas[t]
else:
a__ : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
a__ : Dict = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
a__ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
a__ : List[Any] = torch.log(torch.clamp(snake_case , min=1E-20 ) )
a__ : Optional[int] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
a__ : str = variance.log()
a__ : Optional[int] = beta.log()
a__ : int = (predicted_variance + 1) / 2
a__ : Tuple = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case = None , snake_case=None , snake_case = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
a__ : Optional[int] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
a__ , a__ : Dict = torch.split(snake_case , sample.shape[1] , dim=1 )
else:
a__ : Union[str, Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
a__ : Dict = t - 1
a__ : Dict = self.alphas_cumprod[t]
a__ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a__ : int = 1 - alpha_prod_t
a__ : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a__ : Any = self.betas[t]
a__ : Tuple = self.alphas[t]
else:
a__ : Dict = 1 - alpha_prod_t / alpha_prod_t_prev
a__ : Optional[int] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
a__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
a__ : Dict = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
a__ : Tuple = torch.clamp(
snake_case , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a__ : Optional[int] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
a__ : Any = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a__ : Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a__ : Dict = 0
if t > 0:
a__ : int = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case , device=model_output.device )
a__ : Tuple = self._get_variance(
snake_case , predicted_variance=snake_case , prev_timestep=snake_case , )
if self.variance_type == "fixed_small_log":
a__ : Any = variance
elif self.variance_type == "learned_range":
a__ : Union[str, Any] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
a__ : List[Any] = variance * variance_noise
a__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case , pred_original_sample=snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> torch.FloatTensor:
"""simple docstring"""
a__ : Union[str, Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
a__ : Optional[Any] = timesteps.to(original_samples.device )
a__ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
a__ : Optional[int] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
a__ : List[Any] = sqrt_alpha_prod.unsqueeze(-1 )
a__ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
a__ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
a__ : Tuple = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
a__ : Optional[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 629 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _A ( lowerCamelCase ):
a__ : Tuple = []
for line in lines:
a__ : Any = re.sub(r"#.*" , "" , lowerCamelCase ) # remove comments
if line:
filtered_lines.append(lowerCamelCase )
a__ : Optional[Any] = "\n".join(lowerCamelCase )
# Make a hash from all this code
a__ : Union[str, Any] = full_str.encode("utf-8" )
return shaaaa(lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE__ : Dict = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE__ : Any = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE__ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = """imagegpt"""
_UpperCamelCase : str = ["""past_key_values"""]
_UpperCamelCase : Union[str, Any] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case=512 + 1 , snake_case=32 * 32 , snake_case=512 , snake_case=24 , snake_case=8 , snake_case=None , snake_case="quick_gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=1E-5 , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , **snake_case , ) -> List[Any]:
"""simple docstring"""
a__ : int = vocab_size
a__ : Union[str, Any] = n_positions
a__ : List[str] = n_embd
a__ : List[str] = n_layer
a__ : List[Any] = n_head
a__ : List[str] = n_inner
a__ : int = activation_function
a__ : Dict = resid_pdrop
a__ : List[Any] = embd_pdrop
a__ : List[Any] = attn_pdrop
a__ : int = layer_norm_epsilon
a__ : Optional[int] = initializer_range
a__ : List[str] = scale_attn_weights
a__ : str = use_cache
a__ : str = scale_attn_by_inverse_layer_idx
a__ : int = reorder_and_upcast_attn
a__ : List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case , **snake_case )
class __lowerCAmelCase ( _UpperCamelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def _snake_case ( self , snake_case , snake_case = 1 , snake_case = -1 , snake_case = False , snake_case = None , snake_case = 3 , snake_case = 32 , snake_case = 32 , ) -> Mapping[str, Any]:
"""simple docstring"""
a__ : Tuple = self._generate_dummy_images(snake_case , snake_case , snake_case , snake_case )
a__ : Any = dict(preprocessor(images=snake_case , return_tensors=snake_case ) )
return inputs
| 629 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 629 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 1 |
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if index == number_of_items:
return 0
a__ : Dict = 0
a__ : Optional[int] = 0
a__ : Optional[int] = knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
a__ : Union[str, Any] = values[index] + knapsack(
lowerCamelCase , lowerCamelCase , lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 1 |
def _A ( lowerCamelCase = 1000 ):
a__ : List[str] = -1
a__ : List[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
a__ : Optional[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
a__ : List[str] = n - a - b
if c * c == (a * a + b * b):
a__ : int = a * b * c
if candidate >= product:
a__ : Dict = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 629 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
a__ : List[str] = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
a__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = np.random.randn(3 , 4 )
a__ : Optional[Any] = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
a__ : Optional[int] = np.random.randn(3 , 4 , 5 )
a__ : int = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Any = np.random.randn(3 , 4 )
a__ : int = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
a__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
a__ : Union[str, Any] = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Optional[int] = np.random.randn(3 , 4 )
a__ : Optional[Any] = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
a__ : Tuple = np.random.randn(3 , 4 , 5 )
a__ : Optional[int] = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
a__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = np.random.randn(3 , 4 )
a__ : Tuple = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
a__ : Dict = np.random.randn(3 , 4 , 5 )
a__ : Optional[int] = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_tf
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = np.random.randn(3 , 4 )
a__ : Union[str, Any] = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
a__ : str = np.random.randn(3 , 4 , 5 )
a__ : Optional[Any] = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_flax
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Any = np.random.randn(3 , 4 )
a__ : Tuple = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
a__ : Optional[int] = np.random.randn(3 , 4 , 5 )
a__ : Tuple = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
a__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = np.random.randn(1 , 3 , 4 )
a__ : Optional[int] = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
a__ : str = np.random.randn(1 , 4 , 1 , 5 )
a__ : Union[str, Any] = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : int = np.random.randn(1 , 3 , 4 )
a__ : str = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
a__ : Tuple = np.random.randn(1 , 4 , 1 , 5 )
a__ : Union[str, Any] = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = np.random.randn(1 , 3 , 4 )
a__ : List[str] = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
a__ : Any = np.random.randn(1 , 4 , 1 , 5 )
a__ : List[str] = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = np.random.randn(3 , 4 )
a__ : List[Any] = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = np.random.randn(3 , 4 )
a__ : int = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = np.random.randn(3 , 4 )
a__ : Optional[Any] = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 629 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _A ( ):
a__ , a__ : List[str] = 9, 14 # noqa: F841
a__ : Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a__ : Dict = defaultdict(lowerCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
a__ : List[str] = mst(lowerCamelCase )
a__ : Dict = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
a__ : str = tuple(answer[:2] )
a__ : Tuple = tuple(edge[::-1] )
assert edge in result or reverse in result
| 629 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def _A ( lowerCamelCase ):
a__ : Any = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
a__ : Optional[int] = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
a__ : Any = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a__ : Dict = key[key.find("patch_embed" ) + len("patch_embed" )]
a__ : List[str] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowerCamelCase )-1}""" )
if "norm" in key:
a__ : Union[str, Any] = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a__ : Tuple = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
a__ : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowerCamelCase )-1}""" )
if "layer_norm1" in key:
a__ : Any = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
a__ : List[Any] = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
a__ : Optional[Any] = key[key.find("block" ) + len("block" )]
a__ : List[str] = key.replace(F"""block{idx}""" , F"""block.{int(lowerCamelCase )-1}""" )
if "attn.q" in key:
a__ : Any = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
a__ : Optional[int] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
a__ : Dict = key.replace("attn" , "attention.self" )
if "fc1" in key:
a__ : Union[str, Any] = key.replace("fc1" , "dense1" )
if "fc2" in key:
a__ : int = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
a__ : List[Any] = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
a__ : List[str] = key.replace("linear_fuse.conv" , "linear_fuse" )
a__ : Any = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a__ : Dict = key[key.find("linear_c" ) + len("linear_c" )]
a__ : List[str] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowerCamelCase )-1}""" )
if "bot_conv" in key:
a__ : List[str] = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
a__ : str = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
a__ : Dict = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
a__ : str = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
a__ : int = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
a__ : int = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
a__ : Optional[Any] = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
a__ : Optional[Any] = key.replace("module.last_layer_depth" , "head.head" )
a__ : List[str] = value
return new_state_dict
def _A ( lowerCamelCase , lowerCamelCase ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a__ : str = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
a__ : Dict = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
a__ : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
a__ : Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
a__ : List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
a__ : List[Any] = kv_bias[config.hidden_sizes[i] :]
def _A ( ):
a__ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Union[str, Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=None ):
a__ : int = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
a__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
a__ : List[str] = prepare_img()
a__ : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
a__ : List[str] = torch.load(lowerCamelCase , map_location=torch.device("cpu" ) )
# rename keys
a__ : Union[str, Any] = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
a__ : List[str] = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
a__ : Optional[int] = model(lowerCamelCase )
a__ : List[Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
a__ : Dict = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
a__ : int = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
a__ : Union[str, Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 629 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
SCREAMING_SNAKE_CASE__ : Any = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
SCREAMING_SNAKE_CASE__ : Dict = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def _A ( lowerCamelCase ):
if isinstance(lowerCamelCase , lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
a__ : Tuple = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
a__ : int = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
a__ : str = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
a__ : Dict = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
a__ : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
a__ : List[str] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
a__ : str = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
a__ : Tuple = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
a__ : Any = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
a__ : Union[str, Any] = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
a__ : Union[str, Any] = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
a__ : Optional[int] = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ , a__ , a__ : int = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
a__ , a__ , a__ : int = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
a__ : Union[str, Any] = checkpoint[F"""{old_prefix}.norm.weight"""]
a__ : List[Any] = checkpoint[F"""{old_prefix}.norm.bias"""]
a__ : Any = weight_q.squeeze(-1 ).squeeze(-1 )
a__ : Optional[int] = bias_q.squeeze(-1 ).squeeze(-1 )
a__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
a__ : List[str] = bias_k.squeeze(-1 ).squeeze(-1 )
a__ : str = weight_v.squeeze(-1 ).squeeze(-1 )
a__ : int = bias_v.squeeze(-1 ).squeeze(-1 )
a__ : Dict = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
a__ : Union[str, Any] = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Optional[Any] = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Any = {}
a__ : int = checkpoint["time_embed.0.weight"]
a__ : Tuple = checkpoint["time_embed.0.bias"]
a__ : Optional[int] = checkpoint["time_embed.2.weight"]
a__ : List[Any] = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
a__ : List[Any] = checkpoint["label_emb.weight"]
a__ : Tuple = checkpoint["input_blocks.0.0.weight"]
a__ : Any = checkpoint["input_blocks.0.0.bias"]
a__ : Dict = unet_config["down_block_types"]
a__ : Dict = unet_config["layers_per_block"]
a__ : Any = unet_config["attention_head_dim"]
a__ : Tuple = unet_config["block_out_channels"]
a__ : Dict = 1
a__ : Tuple = channels_list[0]
for i, layer_type in enumerate(lowerCamelCase ):
a__ : int = channels_list[i]
a__ : Tuple = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCamelCase ):
a__ : Dict = F"""down_blocks.{i}.resnets.{j}"""
a__ : Any = F"""input_blocks.{current_layer}.0"""
a__ : Optional[Any] = True if j == 0 and downsample_block_has_skip else False
a__ : List[Any] = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCamelCase ):
a__ : Tuple = F"""down_blocks.{i}.resnets.{j}"""
a__ : str = F"""input_blocks.{current_layer}.0"""
a__ : Optional[int] = True if j == 0 and downsample_block_has_skip else False
a__ : List[Any] = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
a__ : Optional[int] = F"""down_blocks.{i}.attentions.{j}"""
a__ : List[Any] = F"""input_blocks.{current_layer}.1"""
a__ : Any = convert_attention(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
current_layer += 1
if i != len(lowerCamelCase ) - 1:
a__ : Dict = F"""down_blocks.{i}.downsamplers.0"""
a__ : int = F"""input_blocks.{current_layer}.0"""
a__ : int = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
current_layer += 1
a__ : Optional[Any] = current_channels
# hardcoded the mid-block for now
a__ : List[str] = "mid_block.resnets.0"
a__ : List[Any] = "middle_block.0"
a__ : int = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : List[str] = "mid_block.attentions.0"
a__ : Tuple = "middle_block.1"
a__ : Tuple = convert_attention(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : Dict = "mid_block.resnets.1"
a__ : Dict = "middle_block.2"
a__ : List[Any] = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : List[str] = 0
a__ : List[Any] = unet_config["up_block_types"]
for i, layer_type in enumerate(lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
a__ : Any = F"""up_blocks.{i}.resnets.{j}"""
a__ : Tuple = F"""output_blocks.{current_layer}.0"""
a__ : int = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
current_layer += 1
if i != len(lowerCamelCase ) - 1:
a__ : Optional[Any] = F"""up_blocks.{i}.upsamplers.0"""
a__ : Optional[Any] = F"""output_blocks.{current_layer-1}.1"""
a__ : str = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
a__ : Dict = F"""up_blocks.{i}.resnets.{j}"""
a__ : Optional[Any] = F"""output_blocks.{current_layer}.0"""
a__ : Optional[Any] = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
a__ : int = F"""up_blocks.{i}.attentions.{j}"""
a__ : Optional[int] = F"""output_blocks.{current_layer}.1"""
a__ : Dict = convert_attention(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
current_layer += 1
if i != len(lowerCamelCase ) - 1:
a__ : List[Any] = F"""up_blocks.{i}.upsamplers.0"""
a__ : Union[str, Any] = F"""output_blocks.{current_layer-1}.2"""
a__ : int = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = checkpoint["out.0.weight"]
a__ : List[str] = checkpoint["out.0.bias"]
a__ : Optional[Any] = checkpoint["out.2.weight"]
a__ : Tuple = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Tuple = strabool(args.class_cond)
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE__ : int = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE__ : Optional[int] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
SCREAMING_SNAKE_CASE__ : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
SCREAMING_SNAKE_CASE__ : Dict = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
SCREAMING_SNAKE_CASE__ : int = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Tuple = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
SCREAMING_SNAKE_CASE__ : Dict = CMStochasticIterativeScheduler(**scheduler_config)
SCREAMING_SNAKE_CASE__ : Optional[int] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , *snake_case , **snake_case ) -> None:
"""simple docstring"""
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 629 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 1 |
def _A ( lowerCamelCase ):
a__ : List[str] = 0
for ch in input_str:
a__ : Optional[Any] = ord(lowerCamelCase )
a__ : Optional[int] = pow(2 , lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : str = field(
metadata={"""help""": """The output directory where the model will be written."""} ,)
_UpperCamelCase : str = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} ,)
_UpperCamelCase : str = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} ,)
_UpperCamelCase : Optional[str] = field(
default=_UpperCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
_UpperCamelCase : Optional[str] = field(
default=_UpperCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def _A ( ):
a__ : List[Any] = HfArgumentParser((ModelArguments,) )
((a__) , ) : Tuple = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
a__ : Tuple = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
a__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
a__ : int = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
a__ : List[Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
a__ : List[str] = True
a__ : Optional[Any] = True
a__ : Dict = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCamelCase , decoder_config=lowerCamelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
a__ : Dict = decoder_config.decoder_start_token_id
a__ : List[str] = decoder_config.pad_token_id
if decoder_start_token_id is None:
a__ : Optional[Any] = decoder_config.bos_token_id
if pad_token_id is None:
a__ : Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
a__ : Any = decoder_config.eos_token_id
a__ : Dict = decoder_start_token_id
a__ : Optional[int] = pad_token_id
a__ : int = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
a__ : Optional[int] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
a__ : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 629 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """vit_mae"""
def __init__( self , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=True , snake_case=16 , snake_case=512 , snake_case=8 , snake_case=2_048 , snake_case=0.75 , snake_case=False , **snake_case , ) -> str:
"""simple docstring"""
super().__init__(**snake_case )
a__ : List[str] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : Any = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Tuple = hidden_act
a__ : List[str] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : Optional[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : Any = image_size
a__ : Optional[Any] = patch_size
a__ : Optional[Any] = num_channels
a__ : List[Any] = qkv_bias
a__ : Tuple = decoder_num_attention_heads
a__ : Any = decoder_hidden_size
a__ : str = decoder_num_hidden_layers
a__ : Dict = decoder_intermediate_size
a__ : int = mask_ratio
a__ : Any = norm_pix_loss
| 629 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 1 |
from math import sqrt
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
a__ : str = True
# 0 and 1 are none primes.
if number <= 1:
a__ : Any = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
a__ : Tuple = False
break
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'status' must been from type bool"
return status
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
a__ : Optional[int] = list(range(2 , n + 1 ) )
a__ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase ) ):
for j in range(i + 1 , len(lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
a__ : Any = 0
# filters actual prime numbers.
a__ : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
a__ : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase ):
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
a__ : Tuple = [] # this list will be returns of the function.
# potential prime number factors.
a__ : List[str] = 2
a__ : int = number
if number == 0 or number == 1:
ans.append(lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase ):
while quotient != 1:
if is_prime(lowerCamelCase ) and (quotient % factor == 0):
ans.append(lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
a__ : List[Any] = 0
# prime factorization of 'number'
a__ : List[Any] = prime_factorization(lowerCamelCase )
a__ : str = max(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
a__ : Optional[Any] = 0
# prime factorization of 'number'
a__ : Optional[Any] = prime_factorization(lowerCamelCase )
a__ : List[Any] = min(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def _A ( lowerCamelCase ):
assert (
isinstance(lowerCamelCase , lowerCamelCase ) and (number > 2) and is_even(lowerCamelCase )
), "'number' must been an int, even and > 2"
a__ : Optional[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
a__ : Optional[Any] = get_prime_numbers(lowerCamelCase )
a__ : Union[str, Any] = len(lowerCamelCase )
# run variable for while-loops.
a__ : str = 0
a__ : Union[str, Any] = None
# exit variable. for break up the loops
a__ : Any = True
while i < len_pn and loop:
a__ : List[str] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
a__ : List[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (len(lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _A ( lowerCamelCase , lowerCamelCase ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
a__ : Union[str, Any] = 0
while numbera != 0:
a__ : Tuple = numbera % numbera
a__ : str = numbera
a__ : Union[str, Any] = rest
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _A ( lowerCamelCase , lowerCamelCase ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
a__ : Tuple = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
a__ : Optional[Any] = prime_factorization(lowerCamelCase )
a__ : List[Any] = prime_factorization(lowerCamelCase )
elif numbera == 1 or numbera == 1:
a__ : Optional[int] = []
a__ : Optional[int] = []
a__ : Dict = max(lowerCamelCase , lowerCamelCase )
a__ : Any = 0
a__ : Optional[int] = 0
a__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
a__ : str = prime_fac_a.count(lowerCamelCase )
a__ : List[Any] = prime_fac_a.count(lowerCamelCase )
for _ in range(max(lowerCamelCase , lowerCamelCase ) ):
ans *= n
else:
a__ : Union[str, Any] = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
a__ : Dict = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
a__ : str = 0
a__ : str = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase ):
ans += 1
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and is_prime(
lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def _A ( lowerCamelCase , lowerCamelCase ):
assert (
is_prime(lowerCamelCase ) and is_prime(lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
a__ : int = p_number_a + 1 # jump to the next number
a__ : Any = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and ans[0] != p_number_a
and ans[len(lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
a__ : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
a__ : int = get_divisors(lowerCamelCase )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _A ( lowerCamelCase , lowerCamelCase ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
a__ : Tuple = gcd(abs(lowerCamelCase ) , abs(lowerCamelCase ) )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
a__ : Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
a__ : Tuple = 0
a__ : int = 1
a__ : List[str] = 1 # this will be return
for _ in range(n - 1 ):
a__ : List[Any] = ans
ans += fiba
a__ : Any = tmp
return ans
| 629 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Tuple = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 1 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = ComputeEnvironment.AMAZON_SAGEMAKER
_UpperCamelCase : str = True
_UpperCamelCase : str = """ml.p3.2xlarge"""
_UpperCamelCase : str = """accelerate_sagemaker_execution_role"""
_UpperCamelCase : Tuple = """hf-sm"""
_UpperCamelCase : List[str] = """us-east-1"""
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Dict = """accelerate-sagemaker-1"""
_UpperCamelCase : List[Any] = """1.6"""
_UpperCamelCase : int = """4.4"""
_UpperCamelCase : Optional[Any] = """train.py"""
_UpperCamelCase : List[str] = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
_UpperCamelCase : int = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Dict = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , snake_case )
assert isinstance(converted_args["do_train"] , snake_case )
assert isinstance(converted_args["epochs"] , snake_case )
assert isinstance(converted_args["learning_rate"] , snake_case )
assert isinstance(converted_args["max_steps"] , snake_case )
with pytest.raises(snake_case ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
SCREAMING_SNAKE_CASE__ : List[Any] = logging.getLogger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
def _snake_case ( self , snake_case , snake_case , snake_case=None , snake_case=None ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
a__ : Tuple = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" ,_UpperCamelCase ,)
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case ) -> Optional[Any]:
"""simple docstring"""
super().__init__(snake_case )
a__ : Union[str, Any] = BertEncoderWithPabee(snake_case )
self.init_weights()
a__ : Optional[int] = 0
a__ : Optional[Any] = 0
a__ : Optional[int] = 0
a__ : List[Any] = 0
def _snake_case ( self , snake_case ) -> int:
"""simple docstring"""
a__ : Any = threshold
def _snake_case ( self , snake_case ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[int] = patience
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : str = 0
a__ : Tuple = 0
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[int] = self.inference_layers_num / self.inference_instances_num
a__ : Optional[Any] = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def _snake_case ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ) -> str:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
a__ : Any = input_ids.size()
elif inputs_embeds is not None:
a__ : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
a__ : List[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a__ : List[str] = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
a__ : str = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a__ : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
a__ , a__ , a__ : Optional[int] = encoder_hidden_states.size()
a__ : str = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
a__ : Union[str, Any] = torch.ones(snake_case , device=snake_case )
a__ : Dict = self.invert_attention_mask(snake_case )
else:
a__ : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a__ : str = self.get_head_mask(snake_case , self.config.num_hidden_layers )
a__ : List[str] = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
a__ : str = embedding_output
if self.training:
a__ : Optional[Any] = []
for i in range(self.config.num_hidden_layers ):
a__ : Any = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
a__ : List[Any] = self.pooler(snake_case )
a__ : Optional[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
a__ : Optional[int] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
a__ : Optional[Any] = self.pooler(encoder_outputs[0] )
a__ : List[Any] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
a__ : str = 0
a__ : Dict = None
a__ : str = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
a__ : str = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
a__ : Tuple = self.pooler(snake_case )
a__ : Dict = output_layers[i](snake_case )
if regression:
a__ : Union[str, Any] = logits.detach()
if patient_result is not None:
a__ : Optional[int] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
a__ : List[str] = 0
else:
a__ : Tuple = logits.detach().argmax(dim=1 )
if patient_result is not None:
a__ : int = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
a__ : Tuple = 0
a__ : Union[str, Any] = logits
if patient_counter == self.patience:
break
a__ : Optional[int] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ ,_UpperCamelCase ,)
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case ) -> int:
"""simple docstring"""
super().__init__(snake_case )
a__ : Union[str, Any] = config.num_labels
a__ : str = BertModelWithPabee(snake_case )
a__ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
a__ : Optional[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def _snake_case ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[int] = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
a__ : List[Any] = (logits[-1],)
if labels is not None:
a__ : Tuple = None
a__ : Union[str, Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
a__ : Dict = MSELoss()
a__ : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
a__ : Tuple = CrossEntropyLoss()
a__ : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
a__ : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
a__ : Optional[Any] = (total_loss / total_weights,) + outputs
return outputs
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _A ( lowerCamelCase ):
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __lowerCAmelCase :
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
pass
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
pass
def _snake_case ( self ) -> Dict:
"""simple docstring"""
pass
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case , snake_case )
a__ : Tuple = TFVisionTextDualEncoderModel(snake_case )
a__ : List[Any] = model(input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Dict:
"""simple docstring"""
a__ , a__ : Optional[int] = self.get_vision_text_model(snake_case , snake_case )
a__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=snake_case , text_model=snake_case )
a__ : Tuple = model(input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Optional[int]:
"""simple docstring"""
a__ , a__ : Optional[int] = self.get_vision_text_model(snake_case , snake_case )
a__ : Tuple = {"vision_model": vision_model, "text_model": text_model}
a__ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case )
a__ : str = model(input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Any:
"""simple docstring"""
a__ , a__ : Dict = self.get_vision_text_model(snake_case , snake_case )
a__ : Dict = TFVisionTextDualEncoderModel(vision_model=snake_case , text_model=snake_case )
a__ : Dict = model(input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case )
a__ : Optional[int] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
a__ : int = TFVisionTextDualEncoderModel.from_pretrained(snake_case )
a__ : int = model(input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case )
a__ : Optional[Any] = after_output[0].numpy()
a__ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1E-5 )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> str:
"""simple docstring"""
a__ , a__ : Tuple = self.get_vision_text_model(snake_case , snake_case )
a__ : int = TFVisionTextDualEncoderModel(vision_model=snake_case , text_model=snake_case )
a__ : List[Any] = model(
input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case , output_attentions=snake_case )
a__ : int = output.vision_model_output.attentions
self.assertEqual(len(snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = to_atuple(vision_model.config.image_size )
a__ : List[str] = to_atuple(vision_model.config.patch_size )
a__ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a__ : Any = output.text_model_output.attentions
self.assertEqual(len(snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = np.abs((a - b) ).max()
self.assertLessEqual(snake_case , snake_case , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case )
@slow
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , a__ : Tuple = self.get_pretrained_model_and_inputs()
a__ : List[str] = model_a(**snake_case )
a__ : Optional[Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case )
a__ : str = TFVisionTextDualEncoderModel.from_pretrained(snake_case )
a__ : Tuple = model_a(**snake_case )
a__ : Tuple = after_outputs[0].numpy()
a__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1E-5 )
@require_tf
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
a__ : int = 13
a__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a__ : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
a__ : int = random_attention_mask([batch_size, 4] )
a__ : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , snake_case , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = TFViTModel(snake_case , name="vision_model" )
a__ : List[Any] = TFBertModel(snake_case , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = TFViTModelTester(self )
a__ : Any = TFBertModelTester(self )
a__ : str = vit_model_tester.prepare_config_and_inputs()
a__ : str = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Dict = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
a__ : str = 13
a__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a__ : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
a__ : List[Any] = random_attention_mask([batch_size, 4] )
a__ : Tuple = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Tuple:
"""simple docstring"""
a__ , a__ : Optional[int] = self.get_vision_text_model(snake_case , snake_case )
a__ : str = TFVisionTextDualEncoderModel(vision_model=snake_case , text_model=snake_case )
a__ : int = model(
input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case , output_attentions=snake_case )
a__ : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : Union[str, Any] = to_atuple(vision_model.config.image_size )
a__ : Tuple = to_atuple(vision_model.config.patch_size )
a__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a__ : int = output.text_model_output.attentions
self.assertEqual(len(snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , snake_case , snake_case ) -> str:
"""simple docstring"""
a__ : Union[str, Any] = TFDeiTModel(snake_case , name="vision_model" )
a__ : int = TFRobertaModel(snake_case , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Dict = TFDeiTModelTester(self )
a__ : Optional[int] = TFRobertaModelTester(self )
a__ : Dict = vit_model_tester.prepare_config_and_inputs()
a__ : Dict = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Any = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
a__ : List[str] = 13
a__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a__ : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
a__ : Dict = random_attention_mask([batch_size, 4] )
a__ : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , snake_case , snake_case ) -> Optional[Any]:
"""simple docstring"""
a__ : Dict = TFCLIPVisionModel(snake_case , name="vision_model" )
a__ : Union[str, Any] = TFBertModel(snake_case , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[int] = TFCLIPVisionModelTester(self )
a__ : List[Any] = TFBertModelTester(self )
a__ : Tuple = clip_model_tester.prepare_config_and_inputs()
a__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
a__ , a__ : str = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=snake_case )
a__ : Dict = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
a__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a__ : Optional[Any] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=snake_case , padding=snake_case , return_tensors="np" )
a__ : List[Any] = model(**snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a__ : Optional[Any] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , snake_case , atol=1E-3 ) )
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = """instructblip_vision_model"""
def __init__( self , snake_case=1_408 , snake_case=6_144 , snake_case=39 , snake_case=16 , snake_case=224 , snake_case=14 , snake_case="gelu" , snake_case=1E-6 , snake_case=0.0 , snake_case=1E-10 , snake_case=True , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
a__ : Dict = hidden_size
a__ : Tuple = intermediate_size
a__ : Dict = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : Optional[Any] = patch_size
a__ : Union[str, Any] = image_size
a__ : Optional[int] = initializer_range
a__ : int = attention_dropout
a__ : List[str] = layer_norm_eps
a__ : List[Any] = hidden_act
a__ : Tuple = qkv_bias
@classmethod
def _snake_case ( cls , snake_case , **snake_case ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(snake_case )
a__ , a__ : Tuple = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case , **snake_case )
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = """instructblip_qformer"""
def __init__( self , snake_case=30_522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case="absolute" , snake_case=2 , snake_case=1_408 , **snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , **snake_case )
a__ : List[str] = vocab_size
a__ : Tuple = hidden_size
a__ : List[str] = num_hidden_layers
a__ : Union[str, Any] = num_attention_heads
a__ : Tuple = hidden_act
a__ : Dict = intermediate_size
a__ : Any = hidden_dropout_prob
a__ : int = attention_probs_dropout_prob
a__ : Optional[Any] = max_position_embeddings
a__ : List[str] = initializer_range
a__ : Optional[int] = layer_norm_eps
a__ : Tuple = position_embedding_type
a__ : str = cross_attention_frequency
a__ : List[str] = encoder_hidden_size
@classmethod
def _snake_case ( cls , snake_case , **snake_case ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(snake_case )
a__ , a__ : Optional[int] = cls.get_config_dict(snake_case , **snake_case )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Tuple = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case , **snake_case )
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = """instructblip"""
_UpperCamelCase : Optional[Any] = True
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case=32 , **snake_case ) -> str:
"""simple docstring"""
super().__init__(**snake_case )
if vision_config is None:
a__ : List[Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
a__ : List[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
a__ : List[Any] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
a__ : Union[str, Any] = InstructBlipVisionConfig(**snake_case )
a__ : Union[str, Any] = InstructBlipQFormerConfig(**snake_case )
a__ : Optional[Any] = text_config["model_type"] if "model_type" in text_config else "opt"
a__ : Tuple = CONFIG_MAPPING[text_model_type](**snake_case )
a__ : int = self.text_config.tie_word_embeddings
a__ : Union[str, Any] = self.text_config.is_encoder_decoder
a__ : List[str] = num_query_tokens
a__ : List[str] = self.vision_config.hidden_size
a__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a__ : Union[str, Any] = 1.0
a__ : str = 0.02
@classmethod
def _snake_case ( cls , snake_case , snake_case , snake_case , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case , )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : int = copy.deepcopy(self.__dict__ )
a__ : Union[str, Any] = self.vision_config.to_dict()
a__ : Dict = self.qformer_config.to_dict()
a__ : Dict = self.text_config.to_dict()
a__ : Optional[Any] = self.__class__.model_type
return output
| 629 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _A ( lowerCamelCase="" ):
a__ : Any = tempfile.mkdtemp()
return os.path.join(lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
a__ : int = AgentAudio(snake_case )
a__ : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
a__ , a__ : List[str] = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1E-4 ) )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5
a__ : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 16_000 )
a__ : Dict = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Any = torch.randint(0 , 256 , (64, 64, 3) )
a__ : int = AgentImage(snake_case )
a__ : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Dict = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
a__ : str = Image.open(snake_case )
a__ : Any = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
a__ : List[Any] = Image.open(snake_case )
a__ : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : List[str] = "Hey!"
a__ : Dict = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 629 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCAmelCase ( _UpperCamelCase ):
@staticmethod
@abstractmethod
def _snake_case ( snake_case ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
| 629 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 1 |
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE__ : Dict = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE__ : List[Any] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def _A ( lowerCamelCase ):
re.sub("<n>" , "" , lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase ) )
| 629 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
def _A ( ):
for n in range(1 , 100_0000 ):
yield n * (n + 1) // 2
def _A ( lowerCamelCase ):
a__ : Optional[int] = 1
a__ : Any = 2
while i * i <= n:
a__ : List[str] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
return next(i for i in triangle_number_generator() if count_divisors(lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 629 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
a__ : Tuple = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
a__ : Any = model(snake_case )["last_hidden_state"]
a__ : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , snake_case )
# compare the actual values for a slice.
a__ : Any = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 629 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case=30_522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , **snake_case , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
a__ : List[str] = vocab_size
a__ : str = hidden_size
a__ : Any = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : Any = hidden_act
a__ : Optional[int] = intermediate_size
a__ : Optional[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : str = max_position_embeddings
a__ : Tuple = type_vocab_size
a__ : str = initializer_range
a__ : Optional[int] = layer_norm_eps
a__ : Any = position_embedding_type
a__ : Optional[Any] = use_cache
a__ : Tuple = classifier_dropout
class __lowerCAmelCase ( _UpperCamelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 629 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 1 |
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : str = [1]
for i in range(2 , lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
a__ : Optional[int] = []
a__ : Union[str, Any] = list(range(lowerCamelCase ) )
# Find permutation
while factorials:
a__ : int = factorials.pop()
a__ , a__ : str = divmod(lowerCamelCase , lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : str = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : int = {
"""distilbert-base-uncased""": 5_1_2,
"""distilbert-base-uncased-distilled-squad""": 5_1_2,
"""distilbert-base-cased""": 5_1_2,
"""distilbert-base-cased-distilled-squad""": 5_1_2,
"""distilbert-base-german-cased""": 5_1_2,
"""distilbert-base-multilingual-cased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Dict = ["""input_ids""", """attention_mask"""]
_UpperCamelCase : List[Any] = DistilBertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> List[Any]:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : List[Any] = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Dict = do_lower_case
a__ : Optional[Any] = strip_accents
a__ : Dict = tokenize_chinese_chars
a__ : Any = normalizer_class(**snake_case )
a__ : List[Any] = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.