code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : Any = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def __lowerCAmelCase( ) -> Any:
"""simple docstring"""
_A = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_A = get_sagemaker_input()
else:
_A = get_cluster_input()
return config
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
_A = subparsers.add_parser('config' , description=_SCREAMING_SNAKE_CASE )
else:
_A = argparse.ArgumentParser('Accelerate config command' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=_SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = get_user_input()
if args.config_file is not None:
_A = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_A = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(F"accelerate configuration saved at {config_file}" )
def __lowerCAmelCase( ) -> Union[str, Any]:
"""simple docstring"""
_A = config_command_parser()
_A = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_lowercase = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase__ ( ) ->Any:
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__magic_name__ )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(__magic_name__ ) for n in cs]
return dict(zip(__magic_name__ , __magic_name__ ) )
def lowerCAmelCase__ ( __magic_name__ ) ->Tuple:
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : List[str] = VOCAB_FILES_NAMES
_lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , **_lowerCamelCase , ) -> int:
'''simple docstring'''
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(_lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> int:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(_lowerCamelCase )
__lowercase = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(_lowerCamelCase ):
try:
__lowercase = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(_lowerCamelCase )
__lowercase = new_word
if len(_lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(_lowerCamelCase )
__lowercase = " ".join(_lowerCamelCase )
__lowercase = word
return word
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , _lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(" " ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> str:
'''simple docstring'''
return self.decoder.get(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = "".join(_lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + "\n" )
__lowercase = 0
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(_lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCamelCase )
__lowercase = " ".join(_lowerCamelCase )
__lowercase = self.encode(_lowerCamelCase )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 118
| 0
|
import math
import unittest
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(a_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 241
|
from __future__ import annotations
def a ( _UpperCAmelCase : list[float] ):
'''simple docstring'''
if len(_UpperCAmelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
__UpperCAmelCase : Optional[int] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : List[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=50 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=None , ) ->Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = use_labels
A__ = scope
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.prepare_config_and_inputs()
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] , ) ->Dict:
'''simple docstring'''
A__ = BertGenerationEncoder(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] , ) ->Dict:
'''simple docstring'''
A__ = True
A__ = BertGenerationEncoder(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] , ) ->Any:
'''simple docstring'''
A__ = True
A__ = True
A__ = BertGenerationDecoder(config=UpperCAmelCase__).to(UpperCAmelCase__).eval()
# first forward pass
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1)
A__ = torch.cat([input_mask, next_mask] , dim=-1)
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1]).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : List[str] , ) ->List[Any]:
'''simple docstring'''
A__ = BertGenerationDecoder(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = BertGenerationEncoderTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
A__ = '''bert'''
self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
self.assertIsNotNone(UpperCAmelCase__)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]])
with torch.no_grad():
A__ = model(UpperCAmelCase__)[0]
A__ = torch.Size([1, 8, 1_024])
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]])
with torch.no_grad():
A__ = model(UpperCAmelCase__)[0]
A__ = torch.Size([1, 8, 50_358])
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
| 87
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] ="roc_bert"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=3_05_22 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_68 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_72 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : int="absolute" , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=9_10 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : int=2_48_58 , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = enable_pronunciation
UpperCamelCase = enable_shape
UpperCamelCase = pronunciation_embed_dim
UpperCamelCase = pronunciation_vocab_size
UpperCamelCase = shape_embed_dim
UpperCamelCase = shape_vocab_size
UpperCamelCase = concat_input
UpperCamelCase = position_embedding_type
UpperCamelCase = classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 170
| 0
|
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase :
def __init__( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Any=1_4 , __UpperCAmelCase : Any=7 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[Any]=9_9 , __UpperCAmelCase : int=3_2 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : Optional[Any]=3_7 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=5_1_2 , __UpperCAmelCase : Optional[int]=1_6 , __UpperCAmelCase : int=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : List[Any]=None , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = use_mc_token_ids
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = CTRLModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase )
model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , *__UpperCAmelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = CTRLLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Any ) -> Any:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = CTRLForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase (A__ ,A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ : Tuple = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase__ : Any = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = CTRLModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__UpperCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
pass
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = CTRLModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=__UpperCAmelCase ) # Legal the president is
SCREAMING_SNAKE_CASE__ = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE__ = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __UpperCAmelCase )
| 196
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def A ( snake_case__ ):
'''simple docstring'''
def decorator(snake_case__ ):
SCREAMING_SNAKE_CASE__ = getattr(snake_case__ , """handle_key""" , [] )
handle += [key]
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
def A ( *snake_case__ ):
'''simple docstring'''
def decorator(snake_case__ ):
SCREAMING_SNAKE_CASE__ = getattr(snake_case__ , """handle_key""" , [] )
handle += keys
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
class lowerCamelCase (A__ ):
def __new__( cls : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = super().__new__(cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not hasattr(__UpperCAmelCase , """key_handler""" ):
setattr(__UpperCAmelCase , """key_handler""" , {} )
setattr(__UpperCAmelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
SCREAMING_SNAKE_CASE__ = getattr(__UpperCAmelCase , """handle_key""" , [] )
for key in handled_keys:
SCREAMING_SNAKE_CASE__ = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = get_character()
if char != KEYMAP["undefined"]:
SCREAMING_SNAKE_CASE__ = ord(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cls.key_handler.get(__UpperCAmelCase )
if handler:
SCREAMING_SNAKE_CASE__ = char
return handler(cls )
else:
return None
def A ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 196
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =KandinskyVaaInpaintPipeline
lowerCAmelCase__ =['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowerCAmelCase__ =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowerCAmelCase__ =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase__ =False
@property
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Tuple ={
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case__ : Optional[int] =UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Any =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] =self.dummy_unet
snake_case__ : Union[str, Any] =self.dummy_movq
snake_case__ : List[str] =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__SCREAMING_SNAKE_CASE , )
snake_case__ : int ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
snake_case__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : str =Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
snake_case__ : Any =np.ones((64, 64) , dtype=np.floataa )
snake_case__ : List[str] =0
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
snake_case__ : Any =torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Dict =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict ={
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__ : str ='''cpu'''
snake_case__ : Tuple =self.get_dummy_components()
snake_case__ : Tuple =self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Any =pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Dict =output.images
snake_case__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : Optional[Any] =image[0, -3:, -3:, -1]
snake_case__ : List[Any] =image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
snake_case__ : List[Any] =np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
snake_case__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case__ : Union[str, Any] =np.ones((768, 768) , dtype=np.floataa )
snake_case__ : Union[str, Any] =0
snake_case__ : Any ='''a hat'''
snake_case__ : Union[str, Any] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
snake_case__ : Optional[int] =pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ : str =pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case__ : List[str] =pipeline(
image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
snake_case__ : int =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 714
|
def lowercase_ ( SCREAMING_SNAKE_CASE : int = 60_08_51_47_51_43 ):
"""simple docstring"""
try:
snake_case__ : Any =int(SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
snake_case__ : Any =1
snake_case__ : List[str] =2
while i * i <= n:
while n % i == 0:
snake_case__ : Union[str, Any] =i
n //= i
i += 1
if n > 1:
snake_case__ : Any =n
return int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 408
| 0
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''')
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''')
_UpperCAmelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''').input_ids
_UpperCAmelCase : List[Any] = tokenizer('''Hi I am''' , return_tensors='''np''').input_ids
_UpperCAmelCase : Optional[Any] = shift_tokens_right(_A , model.config.pad_token_id , model.config.decoder_start_token_id)
_UpperCAmelCase : List[Any] = model(_A , decoder_input_ids=_A).logits
_UpperCAmelCase : List[str] = optax.softmax_cross_entropy(_A , onehot(_A , logits.shape[-1])).mean()
_UpperCAmelCase : Tuple = -(labels.shape[-1] * loss.item())
_UpperCAmelCase : Tuple = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 485
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , **_A) -> List[str]:
"""simple docstring"""
super().__init__(**_A)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , _A , **_A) -> str:
"""simple docstring"""
return super().__call__(_A , **_A)
def snake_case__ ( self , **_A) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : Optional[int] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
_UpperCAmelCase : str = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def snake_case__ ( self , _A , _A=None , _A="This is a photo of {}.") -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = load_image(_A)
_UpperCAmelCase : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework)
_UpperCAmelCase : Dict = candidate_labels
_UpperCAmelCase : int = [hypothesis_template.format(_A) for x in candidate_labels]
_UpperCAmelCase : Any = self.tokenizer(_A , return_tensors=self.framework , padding=_A)
_UpperCAmelCase : int = [text_inputs]
return inputs
def snake_case__ ( self , _A) -> str:
"""simple docstring"""
_UpperCAmelCase : int = model_inputs.pop('''candidate_labels''')
_UpperCAmelCase : Optional[Any] = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , _A):
_UpperCAmelCase : Union[str, Any] = text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : Union[str, Any] = text_inputs[0][0]
_UpperCAmelCase : Any = self.model(**_A , **_A)
_UpperCAmelCase : Optional[Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def snake_case__ ( self , _A) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = model_outputs.pop('''candidate_labels''')
_UpperCAmelCase : List[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
_UpperCAmelCase : List[str] = logits.softmax(dim=-1).squeeze(-1)
_UpperCAmelCase : List[Any] = probs.tolist()
if not isinstance(_A , _A):
_UpperCAmelCase : List[Any] = [scores]
elif self.framework == "tf":
_UpperCAmelCase : Optional[Any] = stable_softmax(_A , axis=-1)
_UpperCAmelCase : int = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''')
_UpperCAmelCase : str = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_A , _A) , key=lambda _A: -x[0])
]
return result
| 485
| 1
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : Dict = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__lowercase : Union[str, Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str ):
for attribute in key.split('''.''' ):
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
lowerCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.feature_extractor
lowerCamelCase_ = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase_ = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(_lowerCamelCase )[0].split('''.''' )[-2]
lowerCamelCase_ = mapped_key.replace('''*''' , _lowerCamelCase )
if "weight_g" in name:
lowerCamelCase_ = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase_ = '''weight_v'''
elif "bias" in name:
lowerCamelCase_ = '''bias'''
elif "weight" in name:
lowerCamelCase_ = '''weight'''
else:
lowerCamelCase_ = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str ):
lowerCamelCase_ = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase_ = name.split('''.''' )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase_ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase_ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase_ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase_ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Any ):
lowerCamelCase_ = full_name.split('''adaptor.''' )[-1]
lowerCamelCase_ = name.split('''.''' )
if items[1].isdigit():
lowerCamelCase_ = int(items[1] )
else:
lowerCamelCase_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
lowerCamelCase_ = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
lowerCamelCase_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
lowerCamelCase_ = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
lowerCamelCase_ = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
lowerCamelCase_ = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
lowerCamelCase_ = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : List[str] ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
lowerCamelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , ):
lowerCamelCase_ = WavaVecaConfig.from_pretrained(
_lowerCamelCase , add_adapter=_lowerCamelCase , adapter_stride=_lowerCamelCase , adapter_kernel_size=_lowerCamelCase , use_auth_token=_lowerCamelCase , output_hidden_size=_lowerCamelCase , )
lowerCamelCase_ = MBartConfig.from_pretrained(_lowerCamelCase )
# load model
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
lowerCamelCase_ = model[0].eval()
# load feature extractor
lowerCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , use_auth_token=_lowerCamelCase )
# set weights for wav2vec2 encoder
lowerCamelCase_ = WavaVecaModel(_lowerCamelCase )
recursively_load_weights_wavaveca(model.encoder , _lowerCamelCase )
# load decoder weights
lowerCamelCase_ = MBartForCausalLM(_lowerCamelCase )
lowerCamelCase_ , lowerCamelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCamelCase )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowerCamelCase_ = SpeechEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
lowerCamelCase_ = False
lowerCamelCase_ = MBartaaTokenizer(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCamelCase_ = hf_wavavec.config.to_dict()
lowerCamelCase_ = tokenizer.pad_token_id
lowerCamelCase_ = tokenizer.bos_token_id
lowerCamelCase_ = tokenizer.eos_token_id
lowerCamelCase_ = '''mbart50'''
lowerCamelCase_ = '''wav2vec2'''
lowerCamelCase_ = tokenizer.eos_token_id
lowerCamelCase_ = 2_5_0_0_0_4
lowerCamelCase_ = tokenizer.eos_token_id
lowerCamelCase_ = SpeechEncoderDecoderConfig.from_dict(_lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1_0_2_4, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=2_5_0_0_0_4, type=int, help="""`decoder_start_token_id` of model config""")
__lowercase : Optional[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 66
|
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__lowercase :Tuple = JukeboxTokenizer
__lowercase :Optional[Any] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
import torch
lowerCamelCase_ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
lowerCamelCase_ = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCamelCase_ = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
lowerCamelCase_ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
lowerCamelCase_ = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCamelCase_ = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 66
| 1
|
from __future__ import annotations
from fractions import Fraction
def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def __lowerCAmelCase ( UpperCAmelCase__ : int ) -> list[str]:
lowerCamelCase_ = []
lowerCamelCase_ = 1_1
lowerCamelCase_ = int("""1""" + """0""" * digit_len )
for num in range(UpperCAmelCase__ , UpperCAmelCase__ ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(UpperCAmelCase__ , UpperCAmelCase__ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
lowerCamelCase_ = 1_0
return solutions
def __lowerCAmelCase ( UpperCAmelCase__ : int = 2 ) -> int:
lowerCamelCase_ = 1.0
for fraction in fraction_list(UpperCAmelCase__ ):
lowerCamelCase_ = Fraction(UpperCAmelCase__ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 272
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
lowerCamelCase_ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase_ = CLIPTextModel(__UpperCamelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=0 ):
lowerCamelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("""RGB""" )
if str(__UpperCamelCase ).startswith("""mps""" ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowerCamelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = """french fries"""
lowerCamelCase_ = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = [inputs["""prompt"""]] * 2
lowerCamelCase_ = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
lowerCamelCase_ = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
lowerCamelCase_ = image / 2 + 0.5
lowerCamelCase_ = image.permute(0 , 3 , 1 , 2 )
lowerCamelCase_ = image.repeat(2 , 1 , 1 , 1 )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""" ) )[0]
lowerCamelCase_ = components["""vae"""]
lowerCamelCase_ = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase_ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase_ = pipe(**__UpperCamelCase )[0]
lowerCamelCase_ = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __A( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any]=0 ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
lowerCamelCase_ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
lowerCamelCase_ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
lowerCamelCase_ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = 0
def callback_fn(__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor ) -> None:
lowerCamelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCamelCase_ = False
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ = inputs["""image"""].resize((5_0_4, 5_0_4) )
lowerCamelCase_ = """timbrooks/instruct-pix2pix"""
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = pipe(**__UpperCamelCase )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
lowerCamelCase_ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 272
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''OwlViTFeatureExtractor''']
_SCREAMING_SNAKE_CASE = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 1
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : List[Any] , __a : str ):
"""simple docstring"""
_a : str = torch.load(__a , map_location='cpu' )
_a : Optional[Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
_a : Dict = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_a : int = v
else:
_a : Union[str, Any] = v
_a : Any = chkpt['''params''']
_a : Tuple = {n: v for n, v in config.items() if not isinstance(__a , (torch.FloatTensor, numpy.ndarray) )}
_a : Union[str, Any] = chkpt['''dico_word2id''']
_a : Optional[int] = {s + '''</w>''' if s.find('@@' ) == -1 and i > 1_3 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
_a : Optional[Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_a : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
_a : List[str] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__a , __a )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 229
|
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=() , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict="no" , SCREAMING_SNAKE_CASE : int="29500" ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =False
__lowerCamelCase : Tuple =False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
__lowerCamelCase : Optional[int] =True
elif "IPython" in sys.modules:
__lowerCamelCase : int ='''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
__lowerCamelCase : str =PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , SCREAMING_SNAKE_CASE ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
__lowerCamelCase : int =8
__lowerCamelCase : Optional[Any] =PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type='''TPU''' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*SCREAMING_SNAKE_CASE )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE , master_addr='''127.0.01''' , master_port=SCREAMING_SNAKE_CASE , mixed_precision=SCREAMING_SNAKE_CASE ):
__lowerCamelCase : List[str] =PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type='''MULTI_GPU''' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowerCamelCase : Optional[Any] ='''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int]=() , SCREAMING_SNAKE_CASE : Optional[Any]=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
__lowerCamelCase : Tuple =PrepareForLaunch(SCREAMING_SNAKE_CASE , debug=SCREAMING_SNAKE_CASE )
start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method='''fork''' )
| 179
| 0
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase__ = numpy.array([0, 0])
lowerCamelCase__ = numpy.array([0.5, 0.8_660_254])
lowerCamelCase__ = numpy.array([1, 0])
lowerCamelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def A(__a: list[numpy.ndarray] , __a: int ):
lowerCAmelCase_ = initial_vectors
for _ in range(__a ):
lowerCAmelCase_ = iteration_step(__a )
return vectors
def A(__a: list[numpy.ndarray] ):
lowerCAmelCase_ = []
for i, start_vector in enumerate(vectors[:-1] ):
lowerCAmelCase_ = vectors[i + 1]
new_vectors.append(__a )
lowerCAmelCase_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def A(__a: numpy.ndarray , __a: float ):
lowerCAmelCase_ = numpy.radians(__a )
lowerCAmelCase_ , lowerCAmelCase_ = numpy.cos(__a ), numpy.sin(__a )
lowerCAmelCase_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a , __a )
def A(__a: list[numpy.ndarray] ):
lowerCAmelCase_ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCAmelCase_ , lowerCAmelCase_ = zip(*__a )
plt.plot(__a , __a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 715
|
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
lowerCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A(__a: int ):
return sum(int(__a ) for c in str(abs(__a ) ) )
def A():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a: Callable , __a: int ) -> None:
lowerCAmelCase_ = F"{func.__name__}({value})"
lowerCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
print(F"{call:56} = {func(__a )} -- {timing:.4f} seconds" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 226
| 0
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__A : Dict = get_logger(__name__)
__A : Tuple = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : List[Any] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Optional[Any] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , **__lowerCamelCase : Optional[Any] ):
for processor in self:
SCREAMING_SNAKE_CASE = inspect.signature(processor.__call__ ).parameters
if len(__lowerCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys() )} for "
f"{processor.__class__} are passed to the logits processor." )
SCREAMING_SNAKE_CASE = processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return scores
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : float ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" )
SCREAMING_SNAKE_CASE = temperature
def __call__( self : Any , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = scores / self.temperature
return scores
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : float = -float("Inf" ) , __lowerCamelCase : int = 1 ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
SCREAMING_SNAKE_CASE = top_p
SCREAMING_SNAKE_CASE = filter_value
SCREAMING_SNAKE_CASE = min_tokens_to_keep
def __call__( self : Tuple , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = lax.top_k(__lowerCamelCase , scores.shape[-1] )
SCREAMING_SNAKE_CASE = jnp.full_like(__lowerCamelCase , self.filter_value )
SCREAMING_SNAKE_CASE = jax.nn.softmax(__lowerCamelCase , axis=-1 ).cumsum(axis=-1 )
SCREAMING_SNAKE_CASE = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
SCREAMING_SNAKE_CASE = jnp.roll(__lowerCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(__lowerCamelCase )
# min tokens to keep
SCREAMING_SNAKE_CASE = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCamelCase )
SCREAMING_SNAKE_CASE = jnp.where(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jax.lax.sort_key_val(__lowerCamelCase , __lowerCamelCase )[-1]
return next_scores
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : float = -float("Inf" ) , __lowerCamelCase : int = 1 ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" )
SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = filter_value
def __call__( self : Union[str, Any] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = scores.shape
SCREAMING_SNAKE_CASE = jnp.full(batch_size * vocab_size , self.filter_value )
SCREAMING_SNAKE_CASE = min(self.top_k , scores.shape[-1] ) # Safety check
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = lax.top_k(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jnp.broadcast_to((jnp.arange(__lowerCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
SCREAMING_SNAKE_CASE = topk_scores.flatten()
SCREAMING_SNAKE_CASE = topk_indices.flatten() + shift
SCREAMING_SNAKE_CASE = next_scores_flat.at[topk_indices_flat].set(__lowerCamelCase )
SCREAMING_SNAKE_CASE = next_scores_flat.reshape(__lowerCamelCase , __lowerCamelCase )
return next_scores
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = bos_token_id
def __call__( self : str , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE = 1 - jnp.bool_(cur_len - 1 )
SCREAMING_SNAKE_CASE = jnp.where(__lowerCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowerCamelCase )
return scores
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : int , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = eos_token_id
def __call__( self : List[str] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE = 1 - jnp.bool_(cur_len - self.max_length + 1 )
SCREAMING_SNAKE_CASE = jnp.where(__lowerCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowerCamelCase )
return scores
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
SCREAMING_SNAKE_CASE = min_length
SCREAMING_SNAKE_CASE = eos_token_id
def __call__( self : List[str] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
# create boolean flag to decide if min length penalty should be applied
SCREAMING_SNAKE_CASE = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
SCREAMING_SNAKE_CASE = jnp.where(__lowerCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __lowerCamelCase )
return scores
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE = begin_index
def __call__( self : Any , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = 1 - jnp.bool_(cur_len - self.begin_index )
SCREAMING_SNAKE_CASE = jnp.where(__lowerCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __lowerCamelCase )
return scores
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : list ):
SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = dict(__lowerCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
SCREAMING_SNAKE_CASE = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
SCREAMING_SNAKE_CASE = force_token_array.at[index].set(__lowerCamelCase )
SCREAMING_SNAKE_CASE = jnp.intaa(__lowerCamelCase )
def __call__( self : str , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
def _force_token(__lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = scores.shape[0]
SCREAMING_SNAKE_CASE = self.force_token_array[generation_idx]
SCREAMING_SNAKE_CASE = jnp.ones_like(__lowerCamelCase , dtype=scores.dtype ) * -float("inf" )
SCREAMING_SNAKE_CASE = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
SCREAMING_SNAKE_CASE = lax.dynamic_update_slice(__lowerCamelCase , __lowerCamelCase , (0, current_token) )
return new_scores
SCREAMING_SNAKE_CASE = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowerCamelCase ) , lambda: scores , ) , )
return scores
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = generate_config.eos_token_id
SCREAMING_SNAKE_CASE = generate_config.no_timestamps_token_id
SCREAMING_SNAKE_CASE = generate_config.no_timestamps_token_id + 1
SCREAMING_SNAKE_CASE = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__lowerCamelCase , "max_initial_timestamp_index" ):
SCREAMING_SNAKE_CASE = generate_config.max_initial_timestamp_index
else:
SCREAMING_SNAKE_CASE = model_config.vocab_size
if self.max_initial_timestamp_index is None:
SCREAMING_SNAKE_CASE = model_config.vocab_size
def __call__( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ):
# suppress <|notimestamps|> which is handled by without_timestamps
SCREAMING_SNAKE_CASE = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(__lowerCamelCase : Tuple , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = jnp.where((cur_len - self.begin_index) >= 1 , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowerCamelCase , )
SCREAMING_SNAKE_CASE = jnp.where((cur_len - self.begin_index) < 2 , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowerCamelCase , __lowerCamelCase , )
return jnp.where(
__lowerCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __lowerCamelCase , )
SCREAMING_SNAKE_CASE = jax.vmap(__lowerCamelCase )(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jnp.where(cur_len == self.begin_index , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowerCamelCase , )
SCREAMING_SNAKE_CASE = self.timestamp_begin + self.max_initial_timestamp_index
SCREAMING_SNAKE_CASE = jnp.where(
__lowerCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __lowerCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
SCREAMING_SNAKE_CASE = jax.nn.log_softmax(__lowerCamelCase , axis=-1 )
def handle_cumulative_probs(__lowerCamelCase : Tuple , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
SCREAMING_SNAKE_CASE = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __lowerCamelCase , )
SCREAMING_SNAKE_CASE = jax.vmap(__lowerCamelCase )(__lowerCamelCase , __lowerCamelCase )
return scores
| 16
|
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ):
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ )
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCAmelCase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = None
ops.enable_eager_execution_internal()
SCREAMING_SNAKE_CASE : List[Any] = tf.config.list_physical_devices('''CPU''' )
if len(lowerCAmelCase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
SCREAMING_SNAKE_CASE : Tuple = tf.config.list_logical_devices(device_type='''CPU''' )
SCREAMING_SNAKE_CASE : List[Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
SCREAMING_SNAKE_CASE : Optional[Any] = GradientAccumulator()
SCREAMING_SNAKE_CASE : Dict = tf.Variable([4.0, 3.0] )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = create_optimizer(5e-5 , 10 , 5 )
SCREAMING_SNAKE_CASE : List[Any] = tf.Variable([0.0, 0.0] , trainable=lowerCAmelCase__ )
def accumulate_on_replica(lowerCAmelCase__ : List[str] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ):
with strategy.scope():
SCREAMING_SNAKE_CASE : Optional[Any] = strategy.experimental_local_results(lowerCAmelCase__ )
local_variables[0].assign(lowerCAmelCase__ )
local_variables[1].assign(lowerCAmelCase__ )
strategy.run(lowerCAmelCase__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCAmelCase__ )
def _check_local_values(lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ):
SCREAMING_SNAKE_CASE : Tuple = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCAmelCase__ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , lowerCAmelCase__ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 527
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _lowercase : List[Any] , _lowercase : int=7 , _lowercase : Dict=3 , _lowercase : List[str]=18 , _lowercase : Any=30 , _lowercase : Tuple=4_00 , _lowercase : Union[str, Any]=True , _lowercase : Union[str, Any]=None , _lowercase : Tuple=True , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : str = min_resolution
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Dict = size
SCREAMING_SNAKE_CASE__ : List[str] = apply_ocr
def lowercase__ ( self : Dict ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase ( _A , unittest.TestCase ):
lowerCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Dict = LayoutLMvaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowercase , '''size''' ) )
self.assertTrue(hasattr(_lowercase , '''apply_ocr''' ) )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Union[str, Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , _lowercase )
self.assertIsInstance(encoding.boxes , _lowercase )
# Test batched
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase__ ( self : Union[str, Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase__ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase__ ( self : Dict ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE__ : str = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : Tuple = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : str = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(_lowercase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__ : str = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
SCREAMING_SNAKE_CASE__ : Dict = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowercase )
self.assertListEqual(encoding.boxes , _lowercase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(_lowercase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 713
|
import mpmath # for roots of unity
import numpy as np
class lowercase :
def __init__( self : Optional[Any] , _lowercase : List[Any]=None , _lowercase : str=None ):
# Input as list
SCREAMING_SNAKE_CASE__ : int = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : int = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : str = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : Tuple = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Dict = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Any = self.__multiply()
def lowercase__ ( self : Optional[int] , _lowercase : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Dict = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(_lowercase ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : int = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[] for i in range(_lowercase )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : List[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowercase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : List[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowercase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : int = new_dft
SCREAMING_SNAKE_CASE__ : Any = next_ncol // 2
return dft[0]
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dft('''A''' )
SCREAMING_SNAKE_CASE__ : int = self.__dft('''B''' )
SCREAMING_SNAKE_CASE__ : int = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Dict = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : Tuple = [[] for i in range(_lowercase )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Dict = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : str = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
SCREAMING_SNAKE_CASE__ : str = '''A = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Tuple = '''B = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : List[Any] = '''A*B = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
SCREAMING_SNAKE_CASE_ = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
SCREAMING_SNAKE_CASE_ = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
SCREAMING_SNAKE_CASE_ = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
"""simple docstring"""
import requests
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""}
UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase )
if response.status_code != 200:
UpperCAmelCase__ : Any = (
"""Request to slack returned an error """
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(__UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 65
| 0
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__a : List[str] = 6378137.0
__a : int = 6356752.314245
__a : Tuple = 6378137
def _SCREAMING_SNAKE_CASE ( __lowercase : float , __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
__A = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__A = atan((1 - flattening) * tan(radians(__lowercase ) ) )
__A = atan((1 - flattening) * tan(radians(__lowercase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__A = haversine_distance(__lowercase , __lowercase , __lowercase , __lowercase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__A = (b_lata + b_lata) / 2
__A = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__A = (sin(__lowercase ) ** 2) * (cos(__lowercase ) ** 2)
__A = cos(sigma / 2 ) ** 2
__A = (sigma - sin(__lowercase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__A = (cos(__lowercase ) ** 2) * (sin(__lowercase ) ** 2)
__A = sin(sigma / 2 ) ** 2
__A = (sigma + sin(__lowercase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
__a : List[Any] = 8.3_1_4_4_5_9_8
def _SCREAMING_SNAKE_CASE ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__a : int = 300
__a : int = 28
__a : int = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 199
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Optional[Any]=False , __magic_name__ : Dict=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = """backbone.""" if is_semantic else """"""
snake_case__ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"{prefix}cls_token", """beit.embeddings.cls_token"""),
(f"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(f"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(f"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Tuple=False , __magic_name__ : Any=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
snake_case__ : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
snake_case__ : int = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight" )
snake_case__ : List[str] = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias" )
snake_case__ : Any = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias" )
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = q_bias
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : int = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : List[str] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case__ : Tuple = state_dict.pop(f"{prefix}blocks.{i}.gamma_1" )
snake_case__ : str = state_dict.pop(f"{prefix}blocks.{i}.gamma_2" )
snake_case__ : int = gamma_a
snake_case__ : Dict = gamma_a
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[int] = dct.pop(__magic_name__ )
snake_case__ : str = val
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Dict = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any=False ) -> Any:
'''simple docstring'''
snake_case__ : Any = False if """rvlcdip""" in checkpoint_url else True
snake_case__ : Optional[Any] = BeitConfig(use_absolute_position_embeddings=__magic_name__ , use_mask_token=__magic_name__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case__ : Optional[Any] = 10_24
snake_case__ : Optional[Any] = 40_96
snake_case__ : int = 24
snake_case__ : Tuple = 16
# labels
if "rvlcdip" in checkpoint_url:
snake_case__ : Union[str, Any] = 16
snake_case__ : Dict = """huggingface/label-files"""
snake_case__ : str = """rvlcdip-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case__ : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="""cpu""" )["""model"""]
snake_case__ : Tuple = create_rename_keys(__magic_name__ , has_lm_head=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , has_lm_head=__magic_name__ )
# load HuggingFace model
snake_case__ : str = BeitForMaskedImageModeling(__magic_name__ ) if has_lm_head else BeitForImageClassification(__magic_name__ )
model.eval()
model.load_state_dict(__magic_name__ )
# Check outputs on an image
snake_case__ : Tuple = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__magic_name__ )
snake_case__ : str = prepare_img()
snake_case__ : List[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" )
snake_case__ : Dict = encoding["""pixel_values"""]
snake_case__ : Union[str, Any] = model(__magic_name__ )
snake_case__ : Optional[int] = outputs.logits
# verify logits
snake_case__ : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__magic_name__ ), "Shape of logits not as expected"
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
if has_lm_head:
snake_case__ : Optional[int] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
snake_case__ : Dict = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(__magic_name__ , __magic_name__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__magic_name__ , )
model.push_to_hub(
repo_path_or_name=Path(__magic_name__ , __magic_name__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__magic_name__ , )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
A_ : Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 38
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38
| 1
|
from typing import Any
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validation(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
# Creates data structures and fill initial step
UpperCamelCase_ = {}
UpperCamelCase_ = {}
for state in states_space:
UpperCamelCase_ = observations_space[0]
UpperCamelCase_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowercase)):
UpperCamelCase_ = observations_space[o]
UpperCamelCase_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
# Update probabilities and pointers dicts
UpperCamelCase_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase_ = arg_max
# The final observation
UpperCamelCase_ = observations_space[len(__lowercase) - 1]
# argmax for given final observation
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
UpperCamelCase_ = arg_max
# Process pointers backwards
UpperCamelCase_ = last_state
UpperCamelCase_ = []
for o in range(len(__lowercase) - 1 , -1 , -1):
result.append(__lowercase)
UpperCamelCase_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validate_not_empty(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
_validate_lists(__lowercase , __lowercase)
_validate_dicts(
__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError('There\'s an empty parameter')
def _snake_case (__lowercase , __lowercase):
_validate_list(__lowercase , 'observations_space')
_validate_list(__lowercase , 'states_space')
def _snake_case (__lowercase , __lowercase):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list"""
raise ValueError(__lowercase)
else:
for x in _object:
if not isinstance(__lowercase , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list of strings"""
raise ValueError(__lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , ):
_validate_dict(__lowercase , 'initial_probabilities' , __lowercase)
_validate_nested_dict(__lowercase , 'transition_probabilities')
_validate_nested_dict(__lowercase , 'emission_probabilities')
def _snake_case (__lowercase , __lowercase):
_validate_dict(_object , __lowercase , __lowercase)
for x in _object.values():
_validate_dict(__lowercase , __lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase = False):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a dict"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object):
UpperCamelCase_ = f"""{var_name} all keys must be strings"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object.values()):
UpperCamelCase_ = 'nested dictionary ' if nested else ''
UpperCamelCase_ = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__lowercase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 618
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case__ : Any = logging.get_logger(__name__)
@dataclass
class _a :
"""simple docstring"""
A_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
A_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
A_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.task_name.lower()
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """train"""
A_ = """dev"""
A_ = """test"""
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = Split.train , _UpperCAmelCase = None , ) -> Tuple:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _UpperCAmelCase , )
UpperCamelCase_ = args
UpperCamelCase_ = glue_processors[args.task_name]()
UpperCamelCase_ = glue_output_modes[args.task_name]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
UpperCamelCase_ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
UpperCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
UpperCamelCase_ = time.time()
UpperCamelCase_ = torch.load(_UpperCAmelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase_ = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase_ = examples[:limit_length]
UpperCamelCase_ = glue_convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , max_length=args.max_seq_length , label_list=_UpperCAmelCase , output_mode=self.output_mode , )
UpperCamelCase_ = time.time()
torch.save(self.features , _UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Tuple:
return self.label_list
| 618
| 1
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ):
_a = f'Input value of [number={number}] must be an integer'
raise TypeError(UpperCamelCase )
if number < 0:
return False
_a = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 1
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : Tuple=1024 ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = [], []
UpperCamelCase__ = list(zip(UpperCamelCase__, UpperCamelCase__ ) )
UpperCamelCase__ , UpperCamelCase__ = sorted_examples[0]
def is_too_big(UpperCamelCase__ : int ):
return tok(UpperCamelCase__, return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCamelCase__ = new_src + ''' ''' + src
UpperCamelCase__ = new_tgt + ''' ''' + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ = src, tgt
else: # can fit, keep adding
UpperCamelCase__ , UpperCamelCase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def lowerCamelCase_ ( UpperCamelCase__ : Tuple, UpperCamelCase__ : Path, UpperCamelCase__ : List[str], UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
UpperCamelCase__ , UpperCamelCase__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
UpperCamelCase__ = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
UpperCamelCase__ = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
UpperCamelCase__ , UpperCamelCase__ = pack_examples(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
print(F"""packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.""" )
Path(save_path / F"""{split}.source""" ).open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
Path(save_path / F"""{split}.target""" ).open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
for split in ["val", "test"]:
UpperCamelCase__ , UpperCamelCase__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCamelCase__, save_path / F"""{split}.source""" )
shutil.copyfile(UpperCamelCase__, save_path / F"""{split}.target""" )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''', type=UpperCamelCase__, help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''', type=UpperCamelCase__, default=128 )
parser.add_argument('''--data_dir''', type=UpperCamelCase__ )
parser.add_argument('''--save_path''', type=UpperCamelCase__ )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__, Path(args.data_dir ), args.max_seq_len, args.save_path )
if __name__ == "__main__":
packer_cli()
| 591
|
import re
import string
import numpy as np
import datasets
lowercase = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
lowercase = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
lowercase = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def A_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def A_ ( self : Optional[int] , _a : Optional[Any] , _a : Optional[int] , _a : str=None , _a : List[Any]=False , _a : Union[str, Any]=False , _a : str=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase__ = np.array([re.sub(_a , '''''' , _a ) for x in predictions] )
UpperCamelCase__ = np.array([re.sub(_a , '''''' , _a ) for x in references] )
else:
UpperCamelCase__ = np.asarray(_a )
UpperCamelCase__ = np.asarray(_a )
if ignore_case:
UpperCamelCase__ = np.char.lower(_a )
UpperCamelCase__ = np.char.lower(_a )
if ignore_punctuation:
UpperCamelCase__ = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
UpperCamelCase__ = np.char.translate(_a , table=_a )
UpperCamelCase__ = np.char.translate(_a , table=_a )
if ignore_numbers:
UpperCamelCase__ = string.digits.maketrans('''''' , '''''' , string.digits )
UpperCamelCase__ = np.char.translate(_a , table=_a )
UpperCamelCase__ = np.char.translate(_a , table=_a )
UpperCamelCase__ = predictions == references
return {"exact_match": np.mean(_a ) * 100}
| 591
| 1
|
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = name
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = weight
def __repr__( self : Any ):
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def _snake_case ( self : str ):
return self.value
def _snake_case ( self : Tuple ):
return self.name
def _snake_case ( self : str ):
return self.weight
def _snake_case ( self : int ):
return self.value / self.weight
def __a ( A__ : Optional[Any] , A__ : Optional[int] , A__ : int ):
SCREAMING_SNAKE_CASE = []
for i in range(len(UpperCamelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __a ( A__ : int , A__ : List[Any] , A__ : List[Any] ):
SCREAMING_SNAKE_CASE = sorted(UpperCamelCase_ , key=UpperCamelCase_ , reverse=UpperCamelCase_ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0, 0.0
for i in range(len(UpperCamelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __a ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
|
from collections import deque
def a_ ( UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = len(UpperCamelCase_ )
lowerCamelCase = deque()
lowerCamelCase = [False for _ in range(UpperCamelCase_ )]
lowerCamelCase = [-1 for _ in range(UpperCamelCase_ )]
lowerCamelCase = index_of[:]
def strong_connect(UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] ):
lowerCamelCase = index # the number when this node is seen
lowerCamelCase = index # lowest rank node reachable from here
index += 1
stack.append(UpperCamelCase_ )
lowerCamelCase = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase = strong_connect(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase = []
lowerCamelCase = stack.pop()
lowerCamelCase = False
component.append(UpperCamelCase_ )
while w != v:
lowerCamelCase = stack.pop()
lowerCamelCase = False
component.append(UpperCamelCase_ )
components.append(UpperCamelCase_ )
return index
lowerCamelCase = []
for v in range(UpperCamelCase_ ):
if index_of[v] == -1:
strong_connect(UpperCamelCase_ , 0 , UpperCamelCase_ )
return components
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = [[] for _ in range(UpperCamelCase_ )]
for u, v in edges:
g[u].append(UpperCamelCase_ )
return g
if __name__ == "__main__":
# Test
_lowerCAmelCase : Any = 7
_lowerCAmelCase : Optional[int] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowerCAmelCase : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowerCAmelCase : Union[str, Any] = [(u, v) for u, v in zip(source, target)]
_lowerCAmelCase : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 246
| 0
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
A : List[str] = logging.get_logger(__name__)
def lowercase_ ( _A : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(_A , np.ndarray ):
return list(tensor.shape )
lowerCamelCase__ : Union[str, Any] = tf.shape(_A )
if tensor.shape == tf.TensorShape(_A ):
return dynamic
lowerCamelCase__ : Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_A )]
def lowercase_ ( _A : tf.Tensor , _A : Optional[int] = None , _A : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=_A , name=_A )
def lowercase_ ( _A : Tuple , _A : Dict , _A : Any , _A : str=1E-5 , _A : Optional[Any]=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_A , _A ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = tf.nn.moments(_A , axes=[axis] , keepdims=_A )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCamelCase__ : List[Any] = [1] * inputs.shape.rank
lowerCamelCase__ : Union[str, Any] = shape_list(_A )[axis]
lowerCamelCase__ : Tuple = tf.reshape(_A , _A )
lowerCamelCase__ : List[Any] = tf.reshape(_A , _A )
# Compute layer normalization using the batch_normalization
# function.
lowerCamelCase__ : str = tf.nn.batch_normalization(
_A , _A , _A , offset=_A , scale=_A , variance_epsilon=_A , )
return outputs
def lowercase_ ( _A : int , _A : Optional[Any]=0 , _A : Optional[Any]=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCamelCase__ : Dict = tf.shape(_A )
lowerCamelCase__ : List[str] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCamelCase__ : Optional[int] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_A , _A )
def lowercase_ ( _A : tf.Tensor ):
"""simple docstring"""
if not isinstance(_A , tf.Tensor ):
lowerCamelCase__ : str = tf.convert_to_tensor(_A ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCamelCase__ : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCamelCase__ : Optional[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCamelCase__ : Any = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowercase_ ( _A : tf.Tensor , _A : int , _A : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
_A , tf.cast(_A , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(_A )}) must be smaller than the embedding "
F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def lowercase_ ( _A : int , _A : List[Any] , _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Tuple = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCamelCase__ : Union[str, Any] = [x for x in data if len(_A ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
lowerCamelCase__ : List[Any] = np.asarray(_A )
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Dict = np.array_split(_A , _A )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCamelCase__ : Optional[Any] = np.array_split(_A , _A )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_A ):
lowerCamelCase__ : str = chunk_data
else:
lowerCamelCase__ : int = data
def lowercase_ ( _A : str , _A : Any ):
"""simple docstring"""
if name in group.attrs:
lowerCamelCase__ : str = [n.decode("utf8" ) if hasattr(_A , "decode" ) else n for n in group.attrs[name]]
else:
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(_A , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowercase_ ( _A : Dict ):
"""simple docstring"""
def _expand_single_ad_tensor(_A : Dict ):
if isinstance(_A , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_A , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _A )
| 5
|
from __future__ import annotations
import time
import numpy as np
A : Dict = [8, 5, 9, 7]
A : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowercase :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ):
'''simple docstring'''
lowerCamelCase__ : int = claim_vector
lowerCamelCase__ : str = allocated_resources_table
lowerCamelCase__ : int = maximum_claim_table
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.__need()
lowerCamelCase__ : str = self.__allocated_resources_table
lowerCamelCase__ : List[Any] = self.__available_resources()
lowerCamelCase__ : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
lowerCamelCase__ : int = False
for each_need in need_list:
lowerCamelCase__ : Dict = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
lowerCamelCase__ : str = False
break
if execution:
lowerCamelCase__ : Tuple = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase__ : Any = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
| 1
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : int = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Any ) -> Optional[int]:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
snake_case__ :List[Any] = TOKENIZER_CLASSES
else:
snake_case__ :Any = {tokenizer_name: getattr(__snake_case , tokenizer_name + "Fast" )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
snake_case__ :str = TOKENIZER_CLASSES[tokenizer_name]
snake_case__ :Dict = True
if checkpoint_name is None:
snake_case__ :int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
snake_case__ :Dict = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
snake_case__ :List[str] = tokenizer_class.from_pretrained(__snake_case , force_download=__snake_case )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
snake_case__ , snake_case__ :List[str] = checkpoint.split("/" )
snake_case__ :Dict = os.path.join(__snake_case , __snake_case )
elif add_prefix:
snake_case__ :Any = checkpoint
snake_case__ :Dict = dump_path
else:
snake_case__ :int = None
snake_case__ :Union[str, Any] = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
snake_case__ :Tuple = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
snake_case__ :List[str] = file_path.split(__snake_case )[-1][0]
if next_char == "/":
snake_case__ :str = os.path.join(__snake_case , __snake_case )
snake_case__ :int = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
snake_case__ :List[Any] = tokenizer.save_pretrained(
__snake_case , legacy_format=__snake_case , filename_prefix=__snake_case )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__snake_case )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
__UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__UpperCAmelCase : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 241
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__UpperCAmelCase : Any = parser.parse_args()
__UpperCAmelCase : str = "cpu"
__UpperCAmelCase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__UpperCAmelCase : Optional[Any] = "path-to-your-trained-model"
__UpperCAmelCase : Dict = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__UpperCAmelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
__UpperCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
__UpperCAmelCase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
__UpperCAmelCase : int = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__UpperCAmelCase : Union[str, Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__UpperCAmelCase : List[str] = torch.randn(2, 4, 6_4, 6_4)
__UpperCAmelCase : Optional[int] = torch.rand(1) * 9_9_9
__UpperCAmelCase : Any = torch.randn(2, 7_7, 7_6_8)
__UpperCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
__UpperCAmelCase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__UpperCAmelCase : Tuple = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCAmelCase : List[str] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCAmelCase : Union[str, Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__UpperCAmelCase : str = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__UpperCAmelCase : Dict = 6_6_6
__UpperCAmelCase : List[Any] = torch.Generator(device).manual_seed(seed)
__UpperCAmelCase : List[str] = {"generator": generator}
if args.steps is not None:
__UpperCAmelCase : Union[str, Any] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__UpperCAmelCase : int = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 241
| 1
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _lowercase , _lowercase , _lowercase=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCamelCase__ : Any = nn.Parameter(_lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCamelCase__ : Optional[int] = nn.Parameter(_lowercase )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = np.asarray(weights[0] )
lowerCamelCase__ : List[Any] = np.asarray(weights[1] )
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = np.asarray(weights[0] )
lowerCamelCase__ : str = np.asarray(weights[1] )
lowerCamelCase__ : str = np.asarray(weights[2] )
lowerCamelCase__ : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = weights[0][0][0]
lowerCamelCase__ : List[Any] = np.asarray(layer_norm_a[0] )
lowerCamelCase__ : Any = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# lsh weights + output
lowerCamelCase__ : Any = weights[0][1]
if len(_lowercase ) < 4:
set_layer_weights_in_torch_lsh(_lowercase , torch_block.attention , _lowercase )
else:
set_layer_weights_in_torch_local(_lowercase , torch_block.attention , _lowercase )
# intermediate weighs
lowerCamelCase__ : Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowercase ) == 4:
lowerCamelCase__ : Tuple = intermediate_weights[2]
# layernorm 2
lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase__ : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# intermediate dense
lowerCamelCase__ : List[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase__ : Dict = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
# intermediate out
lowerCamelCase__ : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = torch_model.reformer
# word embeds
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowercase ) , )
if isinstance(weights[3] , _lowercase ):
lowerCamelCase__ : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase__ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCamelCase__ : List[Any] = nn.Parameter(torch.tensor(_lowercase ) )
lowerCamelCase__ : Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase__ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowercase , _lowercase , _lowercase )
# output layer norm
lowerCamelCase__ : Optional[int] = np.asarray(weights[7][0] )
lowerCamelCase__ : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# output embeddings
lowerCamelCase__ : Any = np.asarray(weights[9][0] )
lowerCamelCase__ : Tuple = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = ReformerConfig.from_json_file(_lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase__ : List[Any] = ReformerModelWithLMHead(_lowercase )
with open(_lowercase , '''rb''' ) as f:
lowerCamelCase__ : Optional[Any] = pickle.load(_lowercase )['''weights''']
set_model_weights_in_torch(_lowercase , _lowercase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 718
|
"""simple docstring"""
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCamelCase__ : Tuple = remove_duplicates(key.upper() )
lowerCamelCase__ : Optional[Any] = len(_lowercase )
# First fill cipher with key characters
lowerCamelCase__ : int = {alphabet[i]: char for i, char in enumerate(_lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowercase ) , 26 ):
lowerCamelCase__ : Optional[int] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCamelCase__ : Optional[Any] = alphabet[i - offset]
lowerCamelCase__ : Union[str, Any] = char
return cipher_alphabet
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
return "".join(cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Tuple = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = input('''Enter message to encode or decode: ''' ).strip()
lowerCamelCase__ : List[str] = input('''Enter keyword: ''' ).strip()
lowerCamelCase__ : int = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowerCamelCase__ : int = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowerCamelCase__ : Optional[Any] = create_cipher_map(_lowercase )
print(func(_lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 121
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowerCamelCase ( __a :Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = SwinConfig()
A__ = swin_name.split("""_""" )
A__ = name_split[1]
A__ = int(name_split[4] )
A__ = int(name_split[3][-1] )
if model_size == "tiny":
A__ = 9_6
A__ = (2, 2, 6, 2)
A__ = (3, 6, 1_2, 2_4)
elif model_size == "small":
A__ = 9_6
A__ = (2, 2, 1_8, 2)
A__ = (3, 6, 1_2, 2_4)
elif model_size == "base":
A__ = 1_2_8
A__ = (2, 2, 1_8, 2)
A__ = (4, 8, 1_6, 3_2)
else:
A__ = 1_9_2
A__ = (2, 2, 1_8, 2)
A__ = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
A__ = 2_1_8_4_1
else:
A__ = 1_0_0_0
A__ = """huggingface/label-files"""
A__ = """imagenet-1k-id2label.json"""
A__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = img_size
A__ = num_classes
A__ = embed_dim
A__ = depths
A__ = num_heads
A__ = window_size
return config
def __lowerCamelCase ( __a :int ) -> List[str]:
"""simple docstring"""
if "patch_embed.proj" in name:
A__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
A__ = """encoder.""" + name
if "attn.proj" in name:
A__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
A__ = """layernorm.weight"""
if name == "norm.bias":
A__ = """layernorm.bias"""
if "head" in name:
A__ = name.replace("""head""" , """classifier""" )
else:
A__ = """swin.""" + name
return name
def __lowerCamelCase ( __a :Optional[int] , __a :str ) -> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__a )
if "mask" in key:
continue
elif "qkv" in key:
A__ = key.split(""".""" )
A__ = int(key_split[1] )
A__ = int(key_split[3] )
A__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[
:dim
]
A__ = val[
dim : dim * 2
]
A__ = val[
-dim:
]
else:
A__ = val
return orig_state_dict
def __lowerCamelCase ( __a :List[Any] , __a :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = timm.create_model(__a , pretrained=__a )
timm_model.eval()
A__ = get_swin_config(__a )
A__ = SwinForImageClassification(__a )
model.eval()
A__ = convert_state_dict(timm_model.state_dict() , __a )
model.load_state_dict(__a )
A__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
A__ = Image.open(requests.get(__a , stream=__a ).raw )
A__ = image_processor(images=__a , return_tensors="""pt""" )
A__ = timm_model(inputs["""pixel_values"""] )
A__ = model(**__a ).logits
assert torch.allclose(__a , __a , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A : List[str] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 176
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( __a :Optional[int] ) -> str:
"""simple docstring"""
A__ = {}
A__ = tokenizer(example["""content"""] , truncation=__a )["""input_ids"""]
A__ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
A : Union[str, Any] = HfArgumentParser(PretokenizationArguments)
A : str = parser.parse_args()
if args.num_workers is None:
A : Any = multiprocessing.cpu_count()
A : int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
A : Optional[int] = time.time()
A : int = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
A : Optional[Any] = time.time()
A : int = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
A : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 176
| 1
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowerCAmelCase : Dict = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
_lowerCAmelCase : str = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
_lowerCAmelCase : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
_lowerCAmelCase : List[Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def lowercase__ ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=0.9 , lowerCamelCase=3 , lowerCamelCase=0.5 ) -> Tuple:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
snake_case__ : int = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase ) , word_tokenize(lowerCamelCase ) , alpha=lowerCamelCase , beta=lowerCamelCase , gamma=lowerCamelCase )
for ref, pred in zip(lowerCamelCase , lowerCamelCase )
]
else:
snake_case__ : str = [
meteor_score.single_meteor_score(lowerCamelCase , lowerCamelCase , alpha=lowerCamelCase , beta=lowerCamelCase , gamma=lowerCamelCase )
for ref, pred in zip(lowerCamelCase , lowerCamelCase )
]
return {"meteor": np.mean(lowerCamelCase )}
| 694
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 409
|
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_UpperCAmelCase = get_logger(__name__)
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase=0 ):
os.makedirs(lowercase , exist_ok=lowercase )
with FSDP.state_dict_type(
lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_: List[Any] =model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_: str =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
SCREAMING_SNAKE_CASE_: List[Any] =os.path.join(lowercase , lowercase )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(lowercase , lowercase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_: Dict =(
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , lowercase )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(lowercase , lowercase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(lowercase , exist_ok=lowercase )
logger.info(f'''Saving model to {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_: Dict ={"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=lowercase , storage_writer=dist_cp.FileSystemWriter(lowercase ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowercase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
SCREAMING_SNAKE_CASE_: List[Any] =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , lowercase )
logger.info(f'''Loading model from {input_model_file}''' )
SCREAMING_SNAKE_CASE_: List[Any] =torch.load(lowercase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_: Dict =(
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , lowercase )
logger.info(f'''Loading model from {input_model_file}''' )
SCREAMING_SNAKE_CASE_: int =torch.load(lowercase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_: Optional[Any] =(
os.path.join(lowercase , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_: List[Any] ={"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowercase , storage_reader=dist_cp.FileSystemReader(lowercase ) , planner=DefaultLoadPlanner() , )
SCREAMING_SNAKE_CASE_: Optional[Any] =state_dict["""model"""]
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0 ):
os.makedirs(lowercase , exist_ok=lowercase )
with FSDP.state_dict_type(
lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_: Optional[int] =FSDP.optim_state_dict(lowercase , lowercase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE_: Optional[int] =(
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
SCREAMING_SNAKE_CASE_: Tuple =os.path.join(lowercase , lowercase )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(lowercase , lowercase )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(lowercase , exist_ok=lowercase )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowercase ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_: int =None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE_: Tuple =(
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.join(lowercase , lowercase )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.load(lowercase )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
SCREAMING_SNAKE_CASE_: str =(
os.path.join(lowercase , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_: Any =load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(lowercase ) , )
SCREAMING_SNAKE_CASE_: Any =optim_state["""optimizer"""]
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_: Tuple =FSDP.optim_state_dict_to_load(lowercase , lowercase , lowercase )
optimizer.load_state_dict(lowercase )
| 409
| 1
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def snake_case ( snake_case__ :int) -> Union[str, Any]:
random.seed(snake_case__)
np.random.seed(snake_case__)
torch.manual_seed(snake_case__)
torch.cuda.manual_seed_all(snake_case__)
# ^^ safe to call this function even if cuda is not available
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 0.9999 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = False , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = 2 / 3 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> Optional[Any]:
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
_A = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
_A = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_A = True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
_A = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
_A = kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
_A = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
_A = kwargs["""min_value"""]
_A = list(lowerCAmelCase_ )
_A = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
_A = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
_A = None
_A = decay
_A = min_decay
_A = update_after_step
_A = use_ema_warmup
_A = inv_gamma
_A = power
_A = 0
_A = None # set in `step()`
_A = model_cls
_A = model_config
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ ) -> "EMAModel":
_A , _A = model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
_A = model_cls.from_pretrained(lowerCAmelCase_ )
_A = cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
_A = self.model_cls.from_config(self.model_config )
_A = self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> float:
_A = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_A = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_A = (1 + step) / (10 + step)
_A = min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
_A = max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
_A = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
_A = parameters.parameters()
_A = list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_A = self.get_decay(self.optimization_step )
_A = decay
_A = 1 - decay
_A = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_A = deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None:
_A = list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> None:
_A = [
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def UpperCAmelCase ( self ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None:
_A = [param.detach().cpu().clone() for param in parameters]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None:
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_A = None
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None:
_A = copy.deepcopy(lowerCAmelCase_ )
_A = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
_A = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
_A = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
_A = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
_A = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
_A = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
_A = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
_A = state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
_A = shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 83
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83
| 1
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
SCREAMING_SNAKE_CASE__ : Any = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : int = 5_0 # max width of layer names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 7_0 # max width of quantizer names
def _A ( lowerCamelCase ):
a__ : Dict = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=lowercase__ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=lowercase__ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=lowercase__ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=lowercase__ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=lowercase__ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=lowercase__ , type=lowercase__ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=lowercase__ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _A ( lowerCamelCase ):
if args.calibrator == "max":
a__ : Optional[Any] = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
a__ : int = "histogram"
elif args.calibrator == "mse":
a__ : Union[str, Any] = "histogram"
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
a__ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowercase__ )
a__ : Any = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowercase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowercase__ )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowercase__ , ["embeddings"] , which="weight" , _disabled=lowercase__ )
if args.quant_disable:
set_quantizer_by_name(lowercase__ , [""] , _disabled=lowercase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowercase__ , args.quant_disable_keyword , _disabled=lowercase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowercase__ , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=lowercase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowercase__ , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=lowercase__ )
if args.recalibrate_weights:
recalibrate_weights(lowercase__ )
if args.fuse_qkv:
fuse_qkv(lowercase__ , lowercase__ )
if args.clip_gelu:
clip_gelu(lowercase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowercase__ )
def _A ( lowerCamelCase ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def _A ( lowerCamelCase , lowerCamelCase ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowercase__ )
def _A ( lowerCamelCase , lowerCamelCase ):
def fusea(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(lowercase__ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
a__ : Optional[int] = qq._amax.detach().item()
a__ : Optional[int] = qk._amax.detach().item()
a__ : List[Any] = qv._amax.detach().item()
a__ : List[Any] = max(lowercase__ , lowercase__ , lowercase__ )
qq._amax.fill_(lowercase__ )
qk._amax.fill_(lowercase__ )
qv._amax.fill_(lowercase__ )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A ( lowerCamelCase , lowerCamelCase ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
a__ : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowercase__ )
a__ : Optional[Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def _A ( lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowercase__ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
a__ : Optional[Any] = mod.weight.shape[0]
a__ : Optional[Any] = mod._weight_quantizer._amax.detach()
a__ : Union[str, Any] = torch.ones(lowercase__ , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def _A ( lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowercase__ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
a__ : Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
a__ : Any = set(range(len(mod.weight.size() ) ) ) - axis_set
a__ : Dict = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowercase__ , keepdims=lowercase__ ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
a__ : Tuple = amax
def _A ( lowerCamelCase , lowerCamelCase=25 , lowerCamelCase=180 , lowerCamelCase=None ):
if ignore is None:
a__ : Optional[Any] = []
elif not isinstance(lowercase__ , lowercase__ ):
a__ : Optional[Any] = [ignore]
a__ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowercase__ , "weight" ):
continue
a__ : Dict = max(lowercase__ , len(lowercase__ ) )
for name, mod in model.named_modules():
a__ : List[Any] = getattr(lowercase__ , "_input_quantizer" , lowercase__ )
a__ : Optional[int] = getattr(lowercase__ , "_weight_quantizer" , lowercase__ )
if not hasattr(lowercase__ , "weight" ):
continue
if type(lowercase__ ) in ignore:
continue
if [True for s in ignore if type(lowercase__ ) is str and s in name]:
continue
a__ : Optional[int] = F"""Act:{input_q.extra_repr()}"""
a__ : str = F"""Wgt:{weight_q.extra_repr()}"""
a__ : List[str] = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(lowercase__ ) <= line_width:
logger.info(lowercase__ )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def _A ( lowerCamelCase ):
a__ : int = 0
for name, mod in model.named_modules():
if isinstance(lowercase__ , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Union[str, Any] = getattr(lowercase__ , lowercase__ , lowercase__ )
if quantizer_mod is not None:
assert hasattr(lowercase__ , lowercase__ )
setattr(lowercase__ , lowercase__ , lowercase__ )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase="both" , **lowerCamelCase ):
a__ : Tuple = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(lowercase__ , lowercase__ , "_input_quantizer" , lowercase__ , lowercase__ )
if which in ["weight", "both"]:
set_quantizer(lowercase__ , lowercase__ , "_weight_quantizer" , lowercase__ , lowercase__ )
logger.info(lowercase__ )
def _A ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowercase__ , "_input_quantizer" ) or hasattr(lowercase__ , "_weight_quantizer" ):
for n in names:
if re.search(lowercase__ , lowercase__ ):
set_quantizers(lowercase__ , lowercase__ , **lowercase__ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(lowercase__ , lowercase__ ):
a__ : Optional[Any] = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(lowercase__ , lowercase__ , lowercase__ )
logger.info(lowercase__ )
| 112
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = 384
snake_case_ = 7
if "tiny" in model_name:
snake_case_ = 96
snake_case_ = (2, 2, 6, 2)
snake_case_ = (3, 6, 12, 24)
elif "small" in model_name:
snake_case_ = 96
snake_case_ = (2, 2, 18, 2)
snake_case_ = (3, 6, 12, 24)
elif "base" in model_name:
snake_case_ = 128
snake_case_ = (2, 2, 18, 2)
snake_case_ = (4, 8, 16, 32)
snake_case_ = 12
snake_case_ = 512
elif "large" in model_name:
snake_case_ = 192
snake_case_ = (2, 2, 18, 2)
snake_case_ = (6, 12, 24, 48)
snake_case_ = 12
snake_case_ = 768
# set label information
snake_case_ = 150
snake_case_ = 'huggingface/label-files'
snake_case_ = 'ade20k-id2label.json'
snake_case_ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(lowercase__ ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = SwinConfig(
embed_dim=lowercase__ , depths=lowercase__ , num_heads=lowercase__ , window_size=lowercase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
snake_case_ = UperNetConfig(
backbone_config=lowercase__ , auxiliary_in_channels=lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = dct.pop(lowercase__ )
snake_case_ = val
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
snake_case_ = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[:dim, :]
snake_case_ = in_proj_bias[: dim]
snake_case_ = in_proj_weight[
dim : dim * 2, :
]
snake_case_ = in_proj_bias[
dim : dim * 2
]
snake_case_ = in_proj_weight[
-dim :, :
]
snake_case_ = in_proj_bias[-dim :]
# fmt: on
def a(lowercase__ ):
'''simple docstring'''
snake_case_ , snake_case_ = x.shape
snake_case_ = x.reshape(lowercase__ , 4 , in_channel // 4 )
snake_case_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowercase__ , lowercase__ )
return x
def a(lowercase__ ):
'''simple docstring'''
snake_case_ , snake_case_ = x.shape
snake_case_ = x.reshape(lowercase__ , in_channel // 4 , 4 )
snake_case_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowercase__ , lowercase__ )
return x
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = x.shape[0]
snake_case_ = x.reshape(4 , in_channel // 4 )
snake_case_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowercase__ )
return x
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = x.shape[0]
snake_case_ = x.reshape(in_channel // 4 , 4 )
snake_case_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowercase__ )
return x
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
snake_case_ = model_name_to_url[model_name]
snake_case_ = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' , file_name=lowercase__ )[
'state_dict'
]
for name, param in state_dict.items():
print(lowercase__ , param.shape )
snake_case_ = get_upernet_config(lowercase__ )
snake_case_ = UperNetForSemanticSegmentation(lowercase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(lowercase__ )
if "bn" in key:
snake_case_ = key.replace('bn' , 'batch_norm' )
snake_case_ = val
# rename keys
snake_case_ = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
snake_case_ = reverse_correct_unfold_reduction_order(lowercase__ )
if "norm" in key:
snake_case_ = reverse_correct_unfold_norm_order(lowercase__ )
model.load_state_dict(lowercase__ )
# verify on image
snake_case_ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
snake_case_ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
snake_case_ = SegformerImageProcessor()
snake_case_ = processor(lowercase__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
snake_case_ = model(lowercase__ )
snake_case_ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
snake_case_ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
snake_case_ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
snake_case_ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
snake_case_ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 187
| 0
|
'''simple docstring'''
import os
def __lowerCamelCase ( ):
'''simple docstring'''
with open(os.path.dirname(_UpperCamelCase ) + '''/grid.txt''' ) as f:
UpperCAmelCase_ = [] # noqa: E741
for _ in range(20 ):
l.append([int(_UpperCamelCase ) for x in f.readline().split()] )
UpperCAmelCase_ = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCAmelCase_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCAmelCase_ = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCAmelCase_ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCAmelCase_ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCAmelCase_ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 703
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[Any] = "T5Config"
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
| 43
| 0
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase = re.compile(R'\b(a|an|the)\b', re.UNICODE)
UpperCamelCase = None
def lowerCAmelCase_ () -> Any:
a_ : str = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowercase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowercase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :int ) -> int:
a_ : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a_ : Optional[Any] = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] ) -> Union[str, Any]:
def remove_articles(_SCREAMING_SNAKE_CASE :Union[str, Any] ):
return ARTICLES_REGEX.sub(" " , lowercase__ )
def white_space_fix(_SCREAMING_SNAKE_CASE :int ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE :Tuple ):
a_ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE :Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase__ ) ) ) )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] ) -> Union[str, Any]:
if not s:
return []
return normalize_answer(lowercase__ ).split()
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Any ) -> List[str]:
return int(normalize_answer(lowercase__ ) == normalize_answer(lowercase__ ) )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :List[Any] ) -> Dict:
a_ : int = get_tokens(lowercase__ )
a_ : List[str] = get_tokens(lowercase__ )
a_ : Tuple = collections.Counter(lowercase__ ) & collections.Counter(lowercase__ )
a_ : int = sum(common.values() )
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a_ : Tuple = 1.0 * num_same / len(lowercase__ )
a_ : Tuple = 1.0 * num_same / len(lowercase__ )
a_ : int = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :List[str] ) -> Optional[int]:
a_ : Optional[Any] = {}
a_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a_ : Any = qa['''id''']
a_ : List[Any] = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowercase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a_ : List[Any] = ['''''']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
a_ : int = preds[qid]
# Take max over all gold answers
a_ : Optional[int] = max(compute_exact(lowercase__ , lowercase__ ) for a in gold_answers )
a_ : List[Any] = max(compute_fa(lowercase__ , lowercase__ ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :List[str] ) -> str:
a_ : List[Any] = {}
for qid, s in scores.items():
a_ : Union[str, Any] = na_probs[qid] > na_prob_thresh
if pred_na:
a_ : str = float(not qid_to_has_ans[qid] )
else:
a_ : str = s
return new_scores
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Any=None ) -> int:
if not qid_list:
a_ : Optional[Any] = len(lowercase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
a_ : Optional[int] = len(lowercase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
for k in new_eval:
a_ : Dict = new_eval[k]
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :List[Any] ) -> Tuple:
plt.step(lowercase__ , lowercase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowercase__ , lowercase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowercase__ )
plt.savefig(lowercase__ )
plt.clf()
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Tuple=None , _SCREAMING_SNAKE_CASE :Any=None ) -> Any:
a_ : int = sorted(lowercase__ , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
a_ : Union[str, Any] = 0.0
a_ : int = 1.0
a_ : Optional[int] = 0.0
a_ : List[Any] = [1.0]
a_ : Dict = [0.0]
a_ : str = 0.0
for i, qid in enumerate(lowercase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a_ : int = true_pos / float(i + 1 )
a_ : Any = true_pos / float(lowercase__ )
if i == len(lowercase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase__ )
recalls.append(lowercase__ )
if out_image:
plot_pr_curve(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :List[Any] ) -> str:
if out_image_dir and not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
a_ : Optional[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a_ : Union[str, Any] = make_precision_recall_eval(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
a_ : Dict = make_precision_recall_eval(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
a_ : str = {k: float(lowercase__ ) for k, v in qid_to_has_ans.items()}
a_ : List[Any] = make_precision_recall_eval(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowercase__ , lowercase__ , "pr_exact" )
merge_eval(lowercase__ , lowercase__ , "pr_f1" )
merge_eval(lowercase__ , lowercase__ , "pr_oracle" )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :List[str] ) -> Dict:
if not qid_list:
return
a_ : Optional[Any] = [na_probs[k] for k in qid_list]
a_ : Tuple = np.ones_like(lowercase__ ) / float(len(lowercase__ ) )
plt.hist(lowercase__ , weights=lowercase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(lowercase__ , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Tuple ) -> List[str]:
a_ : List[str] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a_ : int = num_no_ans
a_ : int = cur_score
a_ : int = 0.0
a_ : Tuple = sorted(lowercase__ , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(lowercase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a_ : Any = scores[qid]
else:
if preds[qid]:
a_ : Dict = -1
else:
a_ : Optional[Any] = 0
cur_score += diff
if cur_score > best_score:
a_ : Tuple = cur_score
a_ : Union[str, Any] = na_probs[qid]
return 100.0 * best_score / len(lowercase__ ), best_thresh
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :Dict ) -> Optional[int]:
a_ : Union[str, Any] = find_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ : int = find_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ : int = best_exact
a_ : Optional[int] = exact_thresh
a_ : Optional[Any] = best_fa
a_ : Optional[Any] = fa_thresh
def lowerCAmelCase_ () -> Union[str, Any]:
with open(OPTS.data_file ) as f:
a_ : Optional[int] = json.load(lowercase__ )
a_ : Any = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
a_ : Any = json.load(lowercase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a_ : Dict = json.load(lowercase__ )
else:
a_ : Any = {k: 0.0 for k in preds}
a_ : str = make_qid_to_has_ans(lowercase__ ) # maps qid to True/False
a_ : str = [k for k, v in qid_to_has_ans.items() if v]
a_ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a_ : str = get_raw_scores(lowercase__ , lowercase__ )
a_ : str = apply_no_ans_threshold(lowercase__ , lowercase__ , lowercase__ , OPTS.na_prob_thresh )
a_ : Optional[int] = apply_no_ans_threshold(lowercase__ , lowercase__ , lowercase__ , OPTS.na_prob_thresh )
a_ : Tuple = make_eval_dict(lowercase__ , lowercase__ )
if has_ans_qids:
a_ : Optional[int] = make_eval_dict(lowercase__ , lowercase__ , qid_list=lowercase__ )
merge_eval(lowercase__ , lowercase__ , "HasAns" )
if no_ans_qids:
a_ : Any = make_eval_dict(lowercase__ , lowercase__ , qid_list=lowercase__ )
merge_eval(lowercase__ , lowercase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , OPTS.out_image_dir )
histogram_na_prob(lowercase__ , lowercase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowercase__ , lowercase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
else:
print(json.dumps(lowercase__ , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 473
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowercase (_UpperCAmelCase ):
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = self._create_example_records()
__lowerCAmelCase : Dict = Dataset.from_list(A_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(A_ ):
self.assertDictEqual(A_ , example_records[i] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : int = self._create_example_records()
__lowerCAmelCase : Optional[Any] = Dataset.from_list(A_ )
__lowerCAmelCase : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCamelCase__ ( self ) ->Union[str, Any]: # checks what happens with missing columns
'''simple docstring'''
__lowerCAmelCase : List[Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__lowerCAmelCase : Union[str, Any] = Dataset.from_list(A_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def UpperCamelCase__ ( self ) ->Tuple: # checks if the type can be inferred from the second record
'''simple docstring'''
__lowerCAmelCase : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__lowerCAmelCase : Union[str, Any] = Dataset.from_list(A_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = Dataset.from_list([] )
self.assertEqual(len(A_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 492
| 0
|
def __lowerCamelCase ( ) -> Tuple:
UpperCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCamelCase = 6
UpperCamelCase = 1
UpperCamelCase = 1901
UpperCamelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCamelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 719
|
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
def __repr__( self : Tuple ):
"""simple docstring"""
return F'Node({self.data})'
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] ):
"""simple docstring"""
UpperCamelCase = None
def __iter__( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.head
while node:
yield node.data
UpperCamelCase = node.next
def __len__( self : Dict ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : str ):
"""simple docstring"""
return "->".join([str(SCREAMING_SNAKE_CASE__ ) for item in self] )
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
UpperCamelCase = self.head
for _ in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase = current.next
UpperCamelCase = data
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
self.insert_nth(0 , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
UpperCamelCase = Node(SCREAMING_SNAKE_CASE__ )
if self.head is None:
UpperCamelCase = new_node
elif index == 0:
UpperCamelCase = self.head # link new_node to head
UpperCamelCase = new_node
else:
UpperCamelCase = self.head
for _ in range(index - 1 ):
UpperCamelCase = temp.next
UpperCamelCase = temp.next
UpperCamelCase = new_node
def __lowerCAmelCase ( self : Tuple ): # print every node data
"""simple docstring"""
print(self )
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
return self.delete_nth(0 )
def __lowerCAmelCase ( self : str ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : int = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
UpperCamelCase = self.head # default first node
if index == 0:
UpperCamelCase = self.head.next
else:
UpperCamelCase = self.head
for _ in range(index - 1 ):
UpperCamelCase = temp.next
UpperCamelCase = temp.next
UpperCamelCase = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return self.head is None
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = self.head
while current:
# Store the current node's next node.
UpperCamelCase = current.next
# Make the current node's next point backwards
UpperCamelCase = prev
# Make the previous node be the current node
UpperCamelCase = current
# Make the current node the next node (to progress iteration)
UpperCamelCase = next_node
# Return prev in order to put the head at the end
UpperCamelCase = prev
def __lowerCamelCase ( ) -> None:
UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
UpperCamelCase = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Tuple:
from doctest import testmod
testmod()
UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(F'length of linked_list is : {len(_lowercase )}' )
if __name__ == "__main__":
main()
| 170
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(__SCREAMING_SNAKE_CASE , os.listdir(__SCREAMING_SNAKE_CASE )[0] , """snapshots""" ) )]
__SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
# shard inputs and rng
__SCREAMING_SNAKE_CASE = replicate(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jax.random.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = shard(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
__SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = 50
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
# shard inputs and rng
__SCREAMING_SNAKE_CASE = replicate(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jax.random.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = shard(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = 50
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
# shard inputs and rng
__SCREAMING_SNAKE_CASE = replicate(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jax.random.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = shard(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = 50
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
# shard inputs and rng
__SCREAMING_SNAKE_CASE = replicate(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jax.random.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = shard(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = scheduler.create_state()
__SCREAMING_SNAKE_CASE = scheduler_state
__SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = 50
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
# shard inputs and rng
__SCREAMING_SNAKE_CASE = replicate(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jax.random.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = shard(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0 ) , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = replicate(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = shard(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__SCREAMING_SNAKE_CASE , use_memory_efficient_attention=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = replicate(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = shard(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 627
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
with open(a__ , """r""" ) as file:
for line_number, line in enumerate(a__ ):
__SCREAMING_SNAKE_CASE = line.strip()
if line:
__SCREAMING_SNAKE_CASE = line.split()
__SCREAMING_SNAKE_CASE = line_number
__SCREAMING_SNAKE_CASE = words[0]
__SCREAMING_SNAKE_CASE = value
return result
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a__ ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE = value[0]
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a__ ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE = key
__SCREAMING_SNAKE_CASE = value if """lm_head""" in full_key else value[0]
UpperCAmelCase : Any = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def a__ ( a__ , a__ , a__=None , a__=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(a__ )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , a__ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE = """weight"""
else:
__SCREAMING_SNAKE_CASE = None
if hf_dict is not None:
rename_dict(a__ , a__ , a__ , a__ , a__ )
else:
set_recursively(a__ , a__ , a__ , a__ , a__ )
return is_used
return is_used
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = load_wavaveca_layer(a__ , a__ , a__ )
if not is_used:
unused_weights.append(a__ )
logger.warning(F'Unused weights: {unused_weights}' )
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(a__ )
@torch.no_grad()
def a__ ( a__ , a__ , a__=None , a__=None , a__=True , a__=False ):
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(a__ )
else:
__SCREAMING_SNAKE_CASE = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE = read_txt_into_dict(a__ )
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification(a__ )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
feature_extractor.save_pretrained(a__ )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """vocab.json""" )
if not os.path.isdir(a__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
__SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(a__ , a__ )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=a__ , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE = WavaVecaForCTC(a__ )
else:
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(a__ )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE = argparse.Namespace(task="""audio_pretraining""" )
__SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a__ )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(a__ , a__ , not is_finetuned )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
UpperCAmelCase : int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 627
| 1
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_lowerCAmelCase = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_lowerCAmelCase = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
A_ = 'maskformer'
A_ = {'hidden_size': 'mask_feature_size'}
A_ = ['resnet', 'swin']
A_ = ['detr']
def __init__( self , _lowerCamelCase = 2_5_6 , _lowerCamelCase = 2_5_6 , _lowerCamelCase = 0.1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0.0_2 , _lowerCamelCase = 1.0 , _lowerCamelCase = 1.0 , _lowerCamelCase = 1.0 , _lowerCamelCase = 2_0.0 , _lowerCamelCase = None , **_lowerCamelCase , )-> str:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase__ = backbone_config.pop('''model_type''' )
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ = (
decoder_config.pop('''model_type''' ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase__ = CONFIG_MAPPING[decoder_type]
lowercase__ = config_class.from_dict(UpperCAmelCase__ )
lowercase__ = backbone_config
lowercase__ = decoder_config
# main feature dimension for the model
lowercase__ = fpn_feature_size
lowercase__ = mask_feature_size
# initializer
lowercase__ = init_std
lowercase__ = init_xavier_std
# Hungarian matcher && loss
lowercase__ = cross_entropy_weight
lowercase__ = dice_weight
lowercase__ = mask_weight
lowercase__ = use_auxiliary_loss
lowercase__ = no_object_weight
lowercase__ = output_auxiliary_logits
lowercase__ = self.decoder_config.encoder_attention_heads
lowercase__ = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def snake_case_( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )-> Dict:
return cls(
backbone_config=UpperCAmelCase__ , decoder_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def snake_case_( self )-> Dict[str, any]:
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.decoder_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 721
|
'''simple docstring'''
_lowerCAmelCase = "Input must be a string of 8 numbers plus letter"
_lowerCAmelCase = "TRWAGMYFPDXBNJZSQVHLCKE"
def _lowerCAmelCase ( lowercase : str ) ->bool:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
lowercase__ = F'''Expected string as input, found {type(lowercase ).__name__}'''
raise TypeError(lowercase )
lowercase__ = spanish_id.replace('''-''' , '''''' ).upper()
if len(lowercase ) != 9:
raise ValueError(lowercase )
try:
lowercase__ = int(spanish_id_clean[0:8] )
lowercase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowercase ) from ex
if letter.isdigit():
raise ValueError(lowercase )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 0
|
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __A ( lowerCAmelCase_ , lowerCAmelCase_=7 ):
_UpperCAmelCase : List[Any] = None
if token is not None:
_UpperCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_UpperCAmelCase : str = """636036"""
_UpperCAmelCase : Any = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_UpperCAmelCase : List[str] = requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : int = get_daily_ci_runs(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCAmelCase : int = workflow_run["""id"""]
break
return workflow_run_id
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
_UpperCAmelCase : int = get_artifacts_links(worflow_run_id=lowerCAmelCase_ , token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCAmelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_ , artifact_url=lowerCAmelCase_ , output_dir=lowerCAmelCase_ , token=lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
get_last_daily_ci_artifacts(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Any = {}
for artifact_name in artifact_names:
_UpperCAmelCase : Optional[Any] = os.path.join(lowerCAmelCase_ , f"{artifact_name}.zip" )
if os.path.isfile(lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = {}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
_UpperCAmelCase : List[Any] = f.read().decode("""UTF-8""" )
return results
| 414
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Any = LxmertTokenizer
snake_case : Tuple = LxmertTokenizerFast
snake_case : List[str] = True
snake_case : int = True
def snake_case_ (self ):
super().setUp()
_UpperCAmelCase : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = """UNwant\u00E9d,running"""
_UpperCAmelCase : str = """unwanted, running"""
return input_text, output_text
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [7, 4, 5, 1_0, 8, 9] )
def snake_case_ (self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_rust_tokenizer()
_UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : str = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
_UpperCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 414
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( a__ ,unittest.TestCase ):
"""simple docstring"""
a_ = DebertaTokenizer
a_ = True
a_ = DebertaTokenizerFast
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
a_ : Optional[int] = dict(zip(_A , range(len(_A ) ) ) )
a_ : Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a_ : int = {'unk_token': '[UNK]'}
a_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Tuple = 'lower newer'
a_ : Dict = 'lower newer'
return input_text, output_text
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = self.get_tokenizer()
a_ : Optional[Any] = 'lower newer'
a_ : Dict = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
a_ : Optional[int] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
a_ : List[str] = tokens + [tokenizer.unk_token]
a_ : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = self.get_tokenizer()
a_ : Any = tokenizer("""Hello""" , """World""" )
a_ : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , _A )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
a_ : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_A )
a_ : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_A )
a_ : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_A , add_prefix_space=_A )
a_ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_A , add_prefix_space=_A )
a_ : int = tokenizer.build_inputs_with_special_tokens(_A )
a_ : List[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
a_ : Dict = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
a_ : Tuple = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
a_ : List[str] = tokenizer(_A , padding=_A )
a_ : Dict = [tokenizer.decode(_A , skip_special_tokens=_A ) for seq in encoding['input_ids']]
# fmt: off
a_ : Tuple = {
'input_ids': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a_ : List[str] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , _A )
for expected, decoded in zip(_A , _A ):
self.assertEqual(_A , _A )
| 716
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A_ : int ):
"""simple docstring"""
a_ : Optional[Any] = 2
a_ : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(A_ )
if n > 1:
factors.append(A_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 460
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit_msn'''
def __init__( self : Optional[Any] , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[str]=1e-06 , _UpperCAmelCase : Any=224 , _UpperCAmelCase : str=16 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : Dict , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
| 82
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Dict = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 491
| 0
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
def __init__( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Optional[int]=37 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[Any]=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = self.prepare_config_and_inputs()
lowercase_ = True
lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = NezhaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = NezhaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = NezhaForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = NezhaForNextSentencePrediction(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = NezhaForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , next_sentence_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = NezhaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = NezhaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = NezhaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.num_choices
lowercase_ = NezhaForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = True
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = NezhaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ = None
self.model_tester.create_and_check_model_as_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = NezhaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase_ = True
lowercase_ = model_class(config=UpperCamelCase__ )
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = torch.jit.trace(
UpperCamelCase__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """bert.pt""" ) )
lowercase_ = torch.jit.load(os.path.join(UpperCamelCase__ , """bert.pt""" ) , map_location=UpperCamelCase__ )
loaded(inputs_dict["""input_ids"""].to(UpperCamelCase__ ) , inputs_dict["""attention_mask"""].to(UpperCamelCase__ ) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
lowercase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
lowercase_ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
lowercase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
lowercase_ = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowercase_ = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) )
| 650
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase : Tuple = 16
lowerCamelCase : List[str] = 32
def _SCREAMING_SNAKE_CASE ( lowercase : Accelerator , lowercase : int = 16 ):
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCamelCase_ = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowercase , batched=lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowercase , padding='longest' , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowercase ) == "1":
lowerCamelCase_ = 2
# New Code #
lowerCamelCase_ = int(args.gradient_accumulation_steps )
lowerCamelCase_ = int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = evaluate.load('glue' , 'mrpc' )
set_seed(lowercase )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=1_00 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
with LocalSGD(
accelerator=lowercase , model=lowercase , local_sgd_steps=lowercase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
lowerCamelCase_ = model(**lowercase )
lowerCamelCase_ = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowercase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase , default=lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=lowercase , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 70
|
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str ):
'''simple docstring'''
if len(lowercase ) != len(lowercase ):
raise ValueError('String lengths must match!' )
lowerCamelCase_ = 0
for chara, chara in zip(lowercase , lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = parent
lowercase__: List[Any] = batch_size
lowercase__: Optional[int] = image_size
lowercase__: Dict = patch_size
lowercase__: Tuple = num_channels
lowercase__: List[Any] = is_training
lowercase__: Any = use_labels
lowercase__: List[str] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: List[str] = num_attention_heads
lowercase__: Dict = intermediate_size
lowercase__: Tuple = hidden_act
lowercase__: Optional[int] = hidden_dropout_prob
lowercase__: Tuple = attention_probs_dropout_prob
lowercase__: Any = type_sequence_label_size
lowercase__: Tuple = initializer_range
lowercase__: List[Any] = scope
lowercase__: int = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase__: Tuple = (image_size // patch_size) ** 2
lowercase__: Tuple = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: Any = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Any = DeiTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: int = DeiTForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__: int = 1
lowercase__: Tuple = DeiTForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__: str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = self.type_sequence_label_size
lowercase__: str = DeiTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__: Dict = 1
lowercase__: str = DeiTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__: Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): Optional[Any] = config_and_inputs
lowercase__: Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : str = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowercase : int = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Any = False
__lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Optional[Any] = DeiTModelTester(self )
lowercase__: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ , lowercase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: str = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__: List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: str = model_class(lowerCAmelCase__ )
lowercase__: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: Tuple = [*signature.parameters.keys()]
lowercase__: int = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
'''simple docstring'''
lowercase__: Optional[Any] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase__: List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
lowercase__: List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
lowercase__: int = model(**lowerCAmelCase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ , lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__: Union[str, Any] = False
lowercase__: Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase__: Any = model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
lowercase__: Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
lowercase__: Optional[Any] = model(**lowerCAmelCase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Dict = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
lowercase__: Optional[Any] = problem_type['title']
lowercase__: Optional[int] = problem_type['num_labels']
lowercase__: Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
lowercase__: Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
lowercase__: Union[str, Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
lowercase__: str = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
lowercase__: Any = model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[Any] = DeiTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def snake_case_ ( ) -> Optional[Any]:
lowercase__: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: List[Any] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
lowerCAmelCase__ )
lowercase__: Union[str, Any] = self.default_image_processor
lowercase__: Union[str, Any] = prepare_img()
lowercase__: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase__: Dict = model(**lowerCAmelCase__ )
# verify the logits
lowercase__: Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
lowercase__: Optional[int] = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Any = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__: List[str] = self.default_image_processor
lowercase__: Dict = prepare_img()
lowercase__: Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: int = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__: List[str] = model(lowerCAmelCase__ )
| 335
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = DDIMPipeline
__lowercase : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
__lowercase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__: Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
lowercase__: int = DDIMScheduler()
lowercase__: List[Any] = {'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith('mps' ):
lowercase__: Any = torch.manual_seed(lowerCAmelCase__ )
else:
lowercase__: Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowercase__: Any = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = 'cpu'
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowercase__: str = pipe(**lowerCAmelCase__ ).images
lowercase__: List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase__: Optional[Any] = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowercase__: int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = 'google/ddpm-cifar10-32'
lowercase__: Union[str, Any] = UNetaDModel.from_pretrained(lowerCAmelCase__ )
lowercase__: Optional[Any] = DDIMScheduler()
lowercase__: List[str] = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddim.to(lowerCAmelCase__ )
ddim.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: str = ddim(generator=lowerCAmelCase__ , eta=0.0 , output_type='numpy' ).images
lowercase__: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__: Optional[Any] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = 'google/ddpm-ema-bedroom-256'
lowercase__: int = UNetaDModel.from_pretrained(lowerCAmelCase__ )
lowercase__: Tuple = DDIMScheduler.from_pretrained(lowerCAmelCase__ )
lowercase__: Any = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddpm.to(lowerCAmelCase__ )
ddpm.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[int] = torch.manual_seed(0 )
lowercase__: Tuple = ddpm(generator=lowerCAmelCase__ , output_type='numpy' ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__: List[str] = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 335
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : Tuple = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __snake_case (lowerCamelCase ):
__a = '''imagegpt'''
__a = ['''past_key_values''']
__a = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: Tuple , A_: Dict=5_12 + 1 , A_: Optional[int]=32 * 32 , A_: Optional[Any]=5_12 , A_: str=24 , A_: Union[str, Any]=8 , A_: Any=None , A_: str="quick_gelu" , A_: Optional[Any]=0.1 , A_: Tuple=0.1 , A_: List[str]=0.1 , A_: Dict=1E-5 , A_: Optional[int]=0.02 , A_: List[str]=True , A_: Tuple=True , A_: List[str]=False , A_: List[Any]=False , A_: Dict=False , **A_: int , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = n_positions
__lowerCamelCase = n_embd
__lowerCamelCase = n_layer
__lowerCamelCase = n_head
__lowerCamelCase = n_inner
__lowerCamelCase = activation_function
__lowerCamelCase = resid_pdrop
__lowerCamelCase = embd_pdrop
__lowerCamelCase = attn_pdrop
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_range
__lowerCamelCase = scale_attn_weights
__lowerCamelCase = use_cache
__lowerCamelCase = scale_attn_by_inverse_layer_idx
__lowerCamelCase = reorder_and_upcast_attn
__lowerCamelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=A_ , **A_ )
class __snake_case (lowerCamelCase ):
@property
def __a ( self: Tuple ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def __a ( self: Tuple , A_: "FeatureExtractionMixin" , A_: int = 1 , A_: int = -1 , A_: bool = False , A_: Optional["TensorType"] = None , A_: int = 3 , A_: int = 32 , A_: int = 32 , ):
__lowerCamelCase = self._generate_dummy_images(A_ , A_ , A_ , A_ )
__lowerCamelCase = dict(preprocessor(images=A_ , return_tensors=A_ ) )
return inputs
| 281
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : str = logging.get_logger(__name__)
__magic_name__ : List[Any] = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __snake_case (lowerCamelCase ):
__a = '''visual_bert'''
def __init__( self: Any , A_: List[str]=3_05_22 , A_: Tuple=7_68 , A_: Dict=5_12 , A_: List[Any]=12 , A_: str=12 , A_: Dict=30_72 , A_: str="gelu" , A_: Union[str, Any]=0.1 , A_: Any=0.1 , A_: List[Any]=5_12 , A_: int=2 , A_: int=0.02 , A_: Any=1E-12 , A_: List[str]=False , A_: str=True , A_: Union[str, Any]=1 , A_: Optional[int]=0 , A_: Dict=2 , **A_: Optional[int] , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 281
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__snake_case = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 400
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase__ ( _UpperCAmelCase ):
A__ : torch.FloatTensor
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase_ : int = 65536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ):
super().__init__()
SCREAMING_SNAKE_CASE__ = sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE__ = block_out_channels[0] * 4
SCREAMING_SNAKE_CASE__ = TimestepEmbedding(
in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE__ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE__ = None
# down
SCREAMING_SNAKE_CASE__ = in_channels
for i, down_block_type in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = output_channel
SCREAMING_SNAKE_CASE__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE__ = i == len(UpperCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE__ = get_down_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase_ )
# mid
SCREAMING_SNAKE_CASE__ = get_mid_block(
UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , )
# up
SCREAMING_SNAKE_CASE__ = list(reversed(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE__ = out_channels
else:
SCREAMING_SNAKE_CASE__ = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = output_channel
SCREAMING_SNAKE_CASE__ = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_ ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE__ = i == len(UpperCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE__ = get_up_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = output_channel
# out
SCREAMING_SNAKE_CASE__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
SCREAMING_SNAKE_CASE__ = get_out_block(
out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ):
SCREAMING_SNAKE_CASE__ = timestep
if not torch.is_tensor(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE__ = self.time_proj(UpperCAmelCase_ )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE__ = self.time_mlp(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = timestep_embed[..., None]
SCREAMING_SNAKE_CASE__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE__ = ()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE__ = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE__ = down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE__ = down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE__ = upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_ )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE__ = self.out_block(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase_ )
| 400
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''xmod'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , _lowercase=False , _lowercase=2 , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=("en_XX",) , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = pre_norm
_lowerCAmelCase = adapter_reduction_factor
_lowerCAmelCase = adapter_layer_norm
_lowerCAmelCase = adapter_reuse_layer_norm
_lowerCAmelCase = ln_before_adapter
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = default_language
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 5
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase = False
try:
_lowercase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase = None , _lowercase = [] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = choices
_lowerCAmelCase = prompt
if sys.platform == "win32":
_lowerCAmelCase = """*"""
else:
_lowerCAmelCase = """➔ """
def _lowercase ( self , _lowercase , _lowercase = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowercase )
else:
forceWrite(self.choices[index] , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowercase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _lowercase ( self , _lowercase , _lowercase = 1 ):
"""simple docstring"""
_lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = int(chr(self.current_selection ) )
_lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowercase )
else:
return
else:
return
def _lowercase ( self , _lowercase = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase = int(builtins.input() )
except ValueError:
_lowerCAmelCase = default_choice
else:
_lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_lowercase , """\n""" )
return choice
| 5
| 1
|
import qiskit
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> qiskit.result.counts.Counts:
_a = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_a = qiskit.QuantumCircuit(__snake_case , __snake_case )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a = qiskit.execute(__snake_case , __snake_case , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 712
|
import math
import random
def __snake_case ( _UpperCamelCase , _UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase :Optional[Any] = 0.02
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> float:
_a = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(_UpperCamelCase ):
# Forward propagation
_a = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_a = (expected / 1_00) - layer_a
# Error delta
_a = layer_1_error * sigmoid_function(_UpperCamelCase , _UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase :List[str] = int(input('Expected value: '))
lowerCamelCase :str = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 346
| 0
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase : List[Any] = logging.getLogger()
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {}
__magic_name__ = os.path.join(A_, """all_results.json""" )
if os.path.exists(A_ ):
with open(A_, """r""" ) as f:
__magic_name__ = json.load(A_ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
__lowerCAmelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def _lowercase ( self : Any ) -> Tuple:
"""simple docstring"""
import xla_spawn
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(UpperCamelCase__ , """argv""" , UpperCamelCase__ ):
__magic_name__ = time()
xla_spawn.main()
__magic_name__ = time()
__magic_name__ = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def _lowercase ( self : int ) -> int:
"""simple docstring"""
import xla_spawn
__magic_name__ = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(UpperCamelCase__ , """argv""" , UpperCamelCase__ ):
xla_spawn.main()
| 529
|
from __future__ import annotations
def a__ ( A_, A_ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, a % b )
__magic_name__ = a // b
return (y, x - k * y)
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, A_ )
__magic_name__ = na * na
__magic_name__ = ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( A_, A_ ):
'''simple docstring'''
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, A_ )
if b < 0:
__magic_name__ = (b % n + n) % n
return b
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = invert_modulo(A_, A_ ), invert_modulo(A_, A_ )
__magic_name__ = na * na
__magic_name__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 529
| 1
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowerCamelCase__ = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
lowerCamelCase__ = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
lowerCamelCase__ = BeautifulSoup(res.text, '''html.parser''')
lowerCamelCase__ = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 721
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ ='''openai-gpt'''
lowerCAmelCase__ ={
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4_0478 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE="cls_index" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[Any] =vocab_size
snake_case__ : Optional[int] =n_positions
snake_case__ : Union[str, Any] =n_embd
snake_case__ : Dict =n_layer
snake_case__ : Dict =n_head
snake_case__ : Optional[int] =afn
snake_case__ : Tuple =resid_pdrop
snake_case__ : str =embd_pdrop
snake_case__ : Tuple =attn_pdrop
snake_case__ : Optional[int] =layer_norm_epsilon
snake_case__ : Any =initializer_range
snake_case__ : List[str] =summary_type
snake_case__ : Dict =summary_use_proj
snake_case__ : Any =summary_activation
snake_case__ : Optional[Any] =summary_first_dropout
snake_case__ : Tuple =summary_proj_to_labels
super().__init__(**__SCREAMING_SNAKE_CASE )
| 408
| 0
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = max(lowerCAmelCase )
UpperCAmelCase = min(lowerCAmelCase )
# create the counting array
UpperCAmelCase = coll_max + 1 - coll_min
UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCAmelCase ):
UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCAmelCase ) ):
UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowercase__ ( lowerCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return "".join([chr(lowerCAmelCase ) for i in counting_sort([ord(lowerCAmelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
SCREAMING_SNAKE_CASE_ = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 373
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def lowercase__ ( lowerCAmelCase : np.ndarray , lowerCAmelCase : float , lowerCAmelCase : int = 16_000 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase ) <= sample_length:
return wav
UpperCAmelCase = randint(0 , len(lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Name of a dataset from the datasets package"} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "A file containing the training audio paths and labels."} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "A file containing the validation audio paths and labels."} )
__SCREAMING_SNAKE_CASE : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__SCREAMING_SNAKE_CASE : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
__SCREAMING_SNAKE_CASE : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
__SCREAMING_SNAKE_CASE : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE : float = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__SCREAMING_SNAKE_CASE : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Name or path of preprocessor config."} )
__SCREAMING_SNAKE_CASE : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__SCREAMING_SNAKE_CASE : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__SCREAMING_SNAKE_CASE : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__SCREAMING_SNAKE_CASE : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def a_ ( self ) -> List[str]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowercase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--label_column_name` to the correct text column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase : List[Any] ):
UpperCAmelCase = []
for audio in batch[data_args.audio_column_name]:
UpperCAmelCase = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase )
UpperCAmelCase = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase = {model_input_name: inputs.get(lowerCAmelCase )}
UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase : int ):
UpperCAmelCase = [audio['array'] for audio in batch[data_args.audio_column_name]]
UpperCAmelCase = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase = {model_input_name: inputs.get(lowerCAmelCase )}
UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase = raw_datasets['train'].features[data_args.label_column_name].names
UpperCAmelCase , UpperCAmelCase = {}, {}
for i, label in enumerate(lowerCAmelCase ):
UpperCAmelCase = str(lowerCAmelCase )
UpperCAmelCase = label
# Load the accuracy metric from the datasets package
UpperCAmelCase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase : int ):
UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase , references=eval_pred.label_ids )
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel=lowerCAmelCase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('eval' , lowerCAmelCase )
trainer.save_metrics('eval' , lowerCAmelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
if __name__ == "__main__":
main()
| 373
| 1
|
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=_UpperCAmelCase ):
lowerCamelCase : int = ['''flax''', '''transformers''']
def __init__( self : Union[str, Any] , *_lowercase : str , **_lowercase : Optional[int] ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowercase__ ( cls : List[str] , *_lowercase : Dict , **_lowercase : Optional[int] ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowercase__ ( cls : List[str] , *_lowercase : Optional[Any] , **_lowercase : Tuple ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowercase ( metaclass=_UpperCAmelCase ):
lowerCamelCase : Any = ['''flax''', '''transformers''']
def __init__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Any ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowercase__ ( cls : List[Any] , *_lowercase : Any , **_lowercase : Any ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowercase__ ( cls : Dict , *_lowercase : Tuple , **_lowercase : Optional[int] ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowercase ( metaclass=_UpperCAmelCase ):
lowerCamelCase : Optional[Any] = ['''flax''', '''transformers''']
def __init__( self : Dict , *_lowercase : Dict , **_lowercase : Dict ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowercase__ ( cls : List[str] , *_lowercase : Dict , **_lowercase : List[str] ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowercase__ ( cls : int , *_lowercase : Dict , **_lowercase : List[str] ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowercase ( metaclass=_UpperCAmelCase ):
lowerCamelCase : Dict = ['''flax''', '''transformers''']
def __init__( self : Optional[Any] , *_lowercase : str , **_lowercase : List[str] ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowercase__ ( cls : List[Any] , *_lowercase : List[Any] , **_lowercase : List[str] ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowercase__ ( cls : str , *_lowercase : Union[str, Any] , **_lowercase : List[Any] ):
requires_backends(cls , ['''flax''', '''transformers'''] )
| 250
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a_ :List[Any] = logging.get_logger(__name__)
def a ( A__ , A__ , A__ ) -> str:
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def a ( A__ , A__ , A__ = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
SCREAMING_SNAKE_CASE__ : str = to_pil_image(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = pil_image.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pytesseract.image_to_data(A__ , lang=A__ , output_type='''dict''' , config=A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Any = [idx for idx, word in enumerate(A__ ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [word for idx, word in enumerate(A__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Optional[int] = [coord for idx, coord in enumerate(A__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [coord for idx, coord in enumerate(A__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Any = [coord for idx, coord in enumerate(A__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [coord for idx, coord in enumerate(A__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : str = []
for x, y, w, h in zip(A__ , A__ , A__ , A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = [x, y, x + w, y + h]
actual_boxes.append(A__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A__ , A__ , A__ ) )
assert len(A__ ) == len(A__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = ['''pixel_values''']
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Optional[str] = None , _lowercase : Optional[str] = "" , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Any = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : Tuple = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tesseract_config
def lowercase__ ( self : Optional[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : str = (size['''height'''], size['''width'''])
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : List[Any] , ):
SCREAMING_SNAKE_CASE__ : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : str = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : str = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(_lowercase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : Any = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_tesseract(_lowercase , _lowercase , _lowercase )
words_batch.append(_lowercase )
boxes_batch.append(_lowercase )
if do_resize:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : List[str] = [flip_channel_order(_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : List[Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = BatchFeature(data={'''pixel_values''': images} , tensor_type=_lowercase )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[str] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 250
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 83
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : List[Any] = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase : int = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
_lowerCamelCase : Optional[Any] = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean()
_lowerCamelCase : Dict = -(labels.shape[-1] * loss.item())
_lowerCamelCase : Dict = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 83
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {}
class _lowerCamelCase ( __lowercase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = '''llama'''
lowerCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : Optional[Any] , snake_case : Union[str, Any]=32000 , snake_case : Optional[Any]=4096 , snake_case : List[Any]=11008 , snake_case : List[str]=32 , snake_case : List[Any]=32 , snake_case : List[Any]=None , snake_case : int="silu" , snake_case : List[Any]=2048 , snake_case : List[str]=0.02 , snake_case : Optional[int]=1E-6 , snake_case : Any=True , snake_case : Optional[Any]=0 , snake_case : List[str]=1 , snake_case : int=2 , snake_case : List[str]=1 , snake_case : List[Any]=False , snake_case : str=None , **snake_case : List[Any] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__UpperCamelCase = num_attention_heads
__UpperCamelCase = num_key_value_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = rms_norm_eps
__UpperCamelCase = pretraining_tp
__UpperCamelCase = use_cache
__UpperCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def snake_case ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
__UpperCamelCase = self.rope_scaling.get('''type''' , __a )
__UpperCamelCase = self.rope_scaling.get('''factor''' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 720
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Dict = "unispeech"
def __init__( self : str , snake_case : Union[str, Any]=32 , snake_case : Optional[Any]=768 , snake_case : Dict=12 , snake_case : Tuple=12 , snake_case : Optional[Any]=3072 , snake_case : Any="gelu" , snake_case : Dict=0.1 , snake_case : Tuple=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=0.0 , snake_case : Any=0.0 , snake_case : Optional[Any]=0.1 , snake_case : List[Any]=0.1 , snake_case : Optional[int]=0.02 , snake_case : List[str]=1E-5 , snake_case : str="group" , snake_case : List[Any]="gelu" , snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , snake_case : List[Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , snake_case : Tuple=False , snake_case : Optional[int]=128 , snake_case : List[str]=16 , snake_case : List[str]=False , snake_case : Dict=True , snake_case : Optional[Any]=0.05 , snake_case : Optional[Any]=10 , snake_case : Union[str, Any]=2 , snake_case : List[str]=0.0 , snake_case : str=10 , snake_case : int=0 , snake_case : Tuple=320 , snake_case : Any=2 , snake_case : List[str]=0.1 , snake_case : Optional[Any]=100 , snake_case : List[Any]=256 , snake_case : Union[str, Any]=256 , snake_case : Any=0.1 , snake_case : str="mean" , snake_case : Union[str, Any]=False , snake_case : str=False , snake_case : Union[str, Any]=256 , snake_case : Optional[Any]=80 , snake_case : str=0 , snake_case : int=1 , snake_case : int=2 , snake_case : Dict=0.5 , **snake_case : Optional[int] , ):
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
__UpperCamelCase = hidden_size
__UpperCamelCase = feat_extract_norm
__UpperCamelCase = feat_extract_activation
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
__UpperCamelCase = conv_bias
__UpperCamelCase = num_conv_pos_embeddings
__UpperCamelCase = num_conv_pos_embedding_groups
__UpperCamelCase = len(self.conv_dim )
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = feat_proj_dropout
__UpperCamelCase = final_dropout
__UpperCamelCase = layerdrop
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = num_ctc_classes
__UpperCamelCase = vocab_size
__UpperCamelCase = do_stable_layer_norm
__UpperCamelCase = use_weighted_layer_sum
__UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase = num_codevectors_per_group
__UpperCamelCase = num_codevector_groups
__UpperCamelCase = contrastive_logits_temperature
__UpperCamelCase = feat_quantizer_dropout
__UpperCamelCase = num_negatives
__UpperCamelCase = codevector_dim
__UpperCamelCase = proj_codevector_dim
__UpperCamelCase = diversity_loss_weight
# ctc loss
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# pretraining loss
__UpperCamelCase = replace_prob
@property
def snake_case ( self : Dict ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 375
| 0
|
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return int((input_a, input_a).count(0 ) != 0 )
def lowerCAmelCase_ ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 21
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a__ : Dict = logging.get_logger(__name__)
a__ : str = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = "dpt"
def __init__( self : Tuple , UpperCAmelCase__ : int=7_6_8 , UpperCAmelCase__ : List[str]=1_2 , UpperCAmelCase__ : Any=1_2 , UpperCAmelCase__ : int=3_0_7_2 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=1E-12 , UpperCAmelCase__ : List[str]=3_8_4 , UpperCAmelCase__ : Tuple=1_6 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[2, 5, 8, 1_1] , UpperCAmelCase__ : Union[str, Any]="project" , UpperCAmelCase__ : Dict=[4, 2, 1, 0.5] , UpperCAmelCase__ : Optional[Any]=[9_6, 1_9_2, 3_8_4, 7_6_8] , UpperCAmelCase__ : List[str]=2_5_6 , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Dict=0.4 , UpperCAmelCase__ : Union[str, Any]=2_5_5 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=[1, 1_0_2_4, 2_4, 2_4] , UpperCAmelCase__ : List[Any]=[0, 1] , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : List[Any] , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
__SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
__SCREAMING_SNAKE_CASE = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
logger.info("Initializing the config with a `BiT` backbone." )
__SCREAMING_SNAKE_CASE = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
__SCREAMING_SNAKE_CASE = backbone_featmap_shape
__SCREAMING_SNAKE_CASE = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
__SCREAMING_SNAKE_CASE = readout_type
__SCREAMING_SNAKE_CASE = reassemble_factors
__SCREAMING_SNAKE_CASE = neck_hidden_sizes
__SCREAMING_SNAKE_CASE = fusion_hidden_size
__SCREAMING_SNAKE_CASE = head_in_index
__SCREAMING_SNAKE_CASE = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE = use_auxiliary_head
__SCREAMING_SNAKE_CASE = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
__SCREAMING_SNAKE_CASE = semantic_classifier_dropout
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 713
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = str(lowerCAmelCase_ )
return n == n[::-1]
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , lowerCAmelCase_ ):
if is_palindrome(lowerCAmelCase_ ) and is_palindrome(bin(lowerCAmelCase_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 553
| 0
|
"""simple docstring"""
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
a = 'Create a default config file for Accelerate with only a few flags set.'
def lowercase (snake_case__ : Any="no" , snake_case__ : str = default_json_config_file , snake_case__ : bool = False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = Path(lowerCamelCase_ )
path.parent.mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
if path.exists():
print(
f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowerCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowerCAmelCase = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
lowerCAmelCase = num_gpus
lowerCAmelCase = False
if num_gpus > 1:
lowerCAmelCase = """MULTI_GPU"""
else:
lowerCAmelCase = """NO"""
elif is_xpu_available() and use_xpu:
lowerCAmelCase = torch.xpu.device_count()
lowerCAmelCase = num_xpus
lowerCAmelCase = False
if num_xpus > 1:
lowerCAmelCase = """MULTI_XPU"""
else:
lowerCAmelCase = """NO"""
elif is_npu_available():
lowerCAmelCase = torch.npu.device_count()
lowerCAmelCase = num_npus
lowerCAmelCase = False
if num_npus > 1:
lowerCAmelCase = """MULTI_NPU"""
else:
lowerCAmelCase = """NO"""
else:
lowerCAmelCase = 0
lowerCAmelCase = True
lowerCAmelCase = 1
lowerCAmelCase = """NO"""
lowerCAmelCase = ClusterConfig(**lowerCamelCase_ )
config.to_json_file(lowerCamelCase_ )
return path
def lowercase (snake_case__ : Any , snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = parser.add_parser("""default""" , parents=lowerCamelCase_ , help=lowerCamelCase_ , formatter_class=lowerCamelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCamelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowerCamelCase_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=lowerCamelCase_ )
return parser
def lowercase (snake_case__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'''accelerate configuration saved at {config_file}''' )
| 169
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664
| 0
|
'''simple docstring'''
from collections import defaultdict
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
_lowercase : List[str] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_lowercase : int = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
_lowercase : Optional[int] = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_lowercase : Union[str, Any] = (1 << len(UpperCamelCase_ )) - 1
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_lowercase : List[str] = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_lowercase : Dict = total_ways_util
return self.dp[mask][task_no]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A : Dict =5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A : List[str] =[[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 4
|
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4
| 1
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_a : List[Any] = 2_048
_a : Tuple = 4_096
_a : int = 42
_a : str = os.environ.pop("PROCESS_TRAIN", "false")
_a : List[str] = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def _a (lowercase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
def choose_first(lowercase__ : Optional[int] , lowercase__ : int=False ):
assert isinstance(lowercase__ , lowercase__ )
if len(lowercase__ ) == 1:
__snake_case = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__snake_case = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
__snake_case = {'id': example['id']}
__snake_case = example['annotations']
__snake_case = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
__snake_case = ['yes'] if 1 in yes_no_answer else ['no']
__snake_case = __snake_case = []
__snake_case = __snake_case = []
__snake_case = ['<cls>']
else:
__snake_case = ['short']
__snake_case = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
__snake_case = ['long']
__snake_case = choose_first(annotation['long_answer'] , is_long_answer=lowercase__ )
__snake_case = []
answer.update(lowercase__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
__snake_case = True
else:
__snake_case = False
__snake_case = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , lowercase__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def _a (lowercase__ : List[str] , lowercase__ : Optional[int]=False ) -> str:
"""simple docstring"""
__snake_case = _get_single_answer(lowercase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__snake_case = example['document']['tokens']
__snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(lowercase__ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__snake_case = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__snake_case = example['document']['tokens']
__snake_case = answer['start_token']
__snake_case = answer['end_token']
__snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__snake_case = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
__snake_case = doc['is_html'][answer['start_token'] : answer['end_token']]
__snake_case = doc['token'][answer['start_token'] : answer['end_token']]
__snake_case = ' '.join([old[i] for i in range(len(lowercase__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , lowercase__ , end='\n' )
print('Old:' , lowercase__ , end='\n\n' )
return {
"context": " ".join(lowercase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _a (lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Dict=2_0_4_8 , lowercase__ : List[str]=4_0_9_6 , lowercase__ : Tuple=True ) -> Optional[int]:
"""simple docstring"""
# overlap will be of doc_stride - q_len
__snake_case = get_context_and_ans(lowercase__ , assertion=lowercase__ )
__snake_case = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__snake_case = tokenizer(example['question']['text'] , out['context'] ).input_ids
__snake_case = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__snake_case = []
__snake_case = []
__snake_case = input_ids[:q_len]
__snake_case = range(lowercase__ , len(lowercase__ ) , max_length - doc_stride )
for i in doc_start_indices:
__snake_case = i + max_length - q_len
__snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(lowercase__ ),
"end_token": [-1_0_0] * len(lowercase__ ),
"category": category,
},
}
__snake_case = out['context'].split()
__snake_case = splitted_context[answer['end_token']]
__snake_case = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=lowercase__ , ).input_ids )
__snake_case = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=lowercase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__snake_case = len(tokenizer(lowercase__ , add_special_tokens=lowercase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__snake_case = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
__snake_case = answer['start_token']
__snake_case = answer['end_token']
if assertion:
__snake_case = tokenizer.decode(lowercase__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , lowercase__ , end='\n\n' )
if len(lowercase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__snake_case = input_ids[:q_len]
__snake_case = range(lowercase__ , len(lowercase__ ) , max_length - doc_stride )
__snake_case = []
__snake_case = []
__snake_case = []
__snake_case = [] # null, yes, no, long, short
for i in doc_start_indices:
__snake_case = i + max_length - q_len
__snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__snake_case = start_token - i + q_len
__snake_case = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
__snake_case = -1_0_0
__snake_case = -1_0_0
answers_category.append('null' )
__snake_case = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowercase__ )
answers_end_token.append(lowercase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(lowercase__ ) )
print('Old:' , tokenizer.decode(lowercase__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : int=2_0_4_8 , lowercase__ : Optional[int]=4_0_9_6 , lowercase__ : List[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_strided_contexts_and_ans(
lowercase__ , lowercase__ , doc_stride=lowercase__ , max_length=lowercase__ , assertion=lowercase__ , )
return example
def _a (lowercase__ : Any , lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
with jsonlines.open(lowercase__ , 'a' ) as writer:
for example in tqdm(lowercase__ , total=len(lowercase__ ) , desc='Saving samples ... ' ):
__snake_case = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_a : List[str] = load_dataset("natural_questions")
_a : Dict = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
_a : Any = data["train" if PROCESS_TRAIN == "true" else "validation"]
_a : Union[str, Any] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
_a : List[Any] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_a : List[str] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
_a : Union[str, Any] = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 56
|
lowerCamelCase_ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 318
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Dict = logging.get_logger(__name__)
class snake_case__ ( lowercase__ ):
_lowerCAmelCase =["""pixel_values"""]
def __init__( self : Optional[int] , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : int = 0.9 , _lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : Union[int, float] = 1 / 2_5_5 , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , **_lowerCamelCase : Union[str, Any] , ):
super().__init__(**__lowercase )
snake_case__ : int = size if size is not None else {'shortest_edge': 2_2_4}
snake_case__ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
snake_case__ : Optional[Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
snake_case__ : str = get_size_dict(__lowercase , param_name='crop_size' )
snake_case__ : str = do_resize
snake_case__ : str = size
snake_case__ : Optional[int] = crop_pct
snake_case__ : List[str] = resample
snake_case__ : List[str] = do_center_crop
snake_case__ : int = crop_size
snake_case__ : List[Any] = do_rescale
snake_case__ : Dict = rescale_factor
snake_case__ : Optional[int] = do_normalize
snake_case__ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : Dict , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : Optional[float] = None , _lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[str] , ):
snake_case__ : Tuple = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case__ : Optional[int] = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case__ : Dict = int(size['height'] / crop_pct )
else:
snake_case__ : Any = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(__lowercase ) )
snake_case__ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
else:
if "shortest_edge" in size:
snake_case__ : Union[str, Any] = get_resize_output_image_size(__lowercase , size=size['shortest_edge'] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
snake_case__ : List[str] = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(__lowercase ) )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Any , ):
snake_case__ : Optional[int] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__lowercase , size=(size['height'], size['width']) , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self : Optional[int] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[int, float] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[str] , ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self : str , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Tuple , ):
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : ImageInput , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : int = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = None , _lowerCamelCase : float = None , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCamelCase : Tuple , ):
snake_case__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[Any] = crop_pct if crop_pct is not None else self.crop_pct
snake_case__ : Optional[Any] = resample if resample is not None else self.resample
snake_case__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
snake_case__ : Union[str, Any] = image_std if image_std is not None else self.image_std
snake_case__ : int = size if size is not None else self.size
snake_case__ : Optional[int] = get_size_dict(__lowercase , default_to_square=__lowercase )
snake_case__ : Any = crop_size if crop_size is not None else self.crop_size
snake_case__ : Optional[int] = get_size_dict(__lowercase , param_name='crop_size' )
snake_case__ : Union[str, Any] = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case__ : int = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
snake_case__ : Optional[int] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
snake_case__ : List[str] = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
snake_case__ : Optional[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
snake_case__ : List[str] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
snake_case__ : Dict = {'pixel_values': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 715
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__ :
def __init__( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : int=1_0 , _lowerCamelCase : Dict=3 , _lowerCamelCase : List[str]=3_2 * 8 , _lowerCamelCase : Tuple=3_2 * 8 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Optional[Any]=6_4 , ):
snake_case__ : Dict = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : str = is_training
snake_case__ : List[str] = use_auxiliary_loss
snake_case__ : Union[str, Any] = num_queries
snake_case__ : List[Any] = num_channels
snake_case__ : Dict = min_size
snake_case__ : str = max_size
snake_case__ : Any = num_labels
snake_case__ : int = hidden_dim
snake_case__ : List[Any] = hidden_dim
def UpperCAmelCase__ ( self : str ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
snake_case__ : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
snake_case__ : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
snake_case__ : str = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
snake_case__ : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case__ : Optional[Any] = self.num_queries
snake_case__ : int = self.num_labels
snake_case__ : Any = [1, 1, 1, 1]
snake_case__ : str = self.num_channels
snake_case__ : List[str] = 6_4
snake_case__ : Optional[int] = 1_2_8
snake_case__ : Optional[int] = self.hidden_dim
snake_case__ : Optional[int] = self.hidden_dim
snake_case__ : Union[str, Any] = self.hidden_dim
return config
def UpperCAmelCase__ ( self : str ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ : Tuple = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict ):
snake_case__ : Optional[Any] = output.encoder_hidden_states
snake_case__ : Optional[int] = output.pixel_decoder_hidden_states
snake_case__ : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : List[str]=False ):
with torch.no_grad():
snake_case__ : Union[str, Any] = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
snake_case__ : Optional[Any] = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ):
snake_case__ : int = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ : Optional[Any] = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
snake_case__ : Any = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
snake_case__ : List[Any] = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCAmelCase ={'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Optional[Any] = MaskaFormerModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def UpperCAmelCase__ ( self : Union[str, Any] ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : int ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(_lowerCamelCase )
snake_case__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Dict = [*signature.parameters.keys()]
snake_case__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Tuple ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case__ : Tuple = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : int = (self.model_tester.min_size,) * 2
snake_case__ : Tuple = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCamelCase ),
'mask_labels': torch.randn((2, 1_0, *size) , device=_lowerCamelCase ),
'class_labels': torch.zeros(2 , 1_0 , device=_lowerCamelCase ).long(),
}
snake_case__ : str = self.model_tester.get_config()
snake_case__ : Optional[int] = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
snake_case__ : Tuple = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase__ ( self : str ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(_lowerCamelCase ).to(_lowerCamelCase )
snake_case__ : List[str] = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase__ ( self : Any ):
if not self.model_tester.is_training:
return
snake_case__ : Tuple = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
snake_case__ : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
snake_case__ : int = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self : Any ):
snake_case__ : int = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = True
snake_case__ : Optional[Any] = True
snake_case__ : Tuple = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
snake_case__ : Union[str, Any] = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
snake_case__ : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case__ : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase : int = 1e-4
def lowercase__( ):
snake_case__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class snake_case__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__ ( self : str ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCAmelCase__ ( self : int ):
snake_case__ : Dict = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
snake_case__ : List[str] = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : Any = image_processor(_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
snake_case__ : Dict = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
snake_case__ : List[Any] = model(**_lowerCamelCase )
snake_case__ : Optional[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
snake_case__ : List[Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
snake_case__ : str = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : Tuple = image_processor(_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
snake_case__ : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
snake_case__ : Dict = model(**_lowerCamelCase )
# masks_queries_logits
snake_case__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case__ : Optional[Any] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
snake_case__ : Optional[Any] = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
snake_case__ : int = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case__ : List[str] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def UpperCAmelCase__ ( self : int ):
snake_case__ : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
snake_case__ : Any = self.default_image_processor
snake_case__ : Optional[int] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='pt' , )
snake_case__ : Dict = inputs['pixel_values'].to(_lowerCamelCase )
snake_case__ : Optional[Any] = [el.to(_lowerCamelCase ) for el in inputs['mask_labels']]
snake_case__ : Union[str, Any] = [el.to(_lowerCamelCase ) for el in inputs['class_labels']]
with torch.no_grad():
snake_case__ : Tuple = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 303
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = {}
if train_file is not None:
lowerCAmelCase__ = [train_file]
if eval_file is not None:
lowerCAmelCase__ = [eval_file]
if test_file is not None:
lowerCAmelCase__ = [test_file]
lowerCAmelCase__ = datasets.load_dataset('csv' , data_files=A__ )
lowerCAmelCase__ = list(ds[list(files.keys() )[0]].features.keys() )
lowerCAmelCase__ = features_name.pop(A__ )
lowerCAmelCase__ = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowerCAmelCase__ = {label: i for i, label in enumerate(A__ )}
lowerCAmelCase__ = tokenizer.model_input_names
lowerCAmelCase__ = {}
if len(A__ ) == 1:
for k in files.keys():
lowerCAmelCase__ = ds[k].map(
lambda snake_case__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
lowerCAmelCase__ = ds[k].map(
lambda snake_case__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowerCAmelCase__ = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowerCAmelCase__ = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowerCAmelCase__ = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase__ = labelaid[ex[label_name]]
yield (d, label)
lowerCAmelCase__ = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowerCAmelCase__ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowerCAmelCase__ = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowerCAmelCase__ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowerCAmelCase__ = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowerCAmelCase__ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __snake_case :
SCREAMING_SNAKE_CASE__ = field(metadata={'help': 'Which column contains the label'} )
SCREAMING_SNAKE_CASE__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the training file'} )
SCREAMING_SNAKE_CASE__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the development file'} )
SCREAMING_SNAKE_CASE__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the test file'} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class __snake_case :
SCREAMING_SNAKE_CASE__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowerCAmelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowerCAmelCase__ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ ) -> Dict:
lowerCAmelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowerCAmelCase__ = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 193
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE_ : int = len(lowerCAmelCase__ )
self.assertGreater(lowerCAmelCase__ , 0 )
self.assertEqual(
lowerCAmelCase__ , [
{
'score': ANY(lowerCAmelCase__ ),
'label': ANY(lowerCAmelCase__ ),
'box': {'xmin': ANY(lowerCAmelCase__ ), 'ymin': ANY(lowerCAmelCase__ ), 'xmax': ANY(lowerCAmelCase__ ), 'ymax': ANY(lowerCAmelCase__ )},
}
for i in range(lowerCAmelCase__ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ : str = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
] , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
]
] , )
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
] , )
SCREAMING_SNAKE_CASE_ : List[Any] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0.2
SCREAMING_SNAKE_CASE_ : str = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
] , )
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : int = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ : Tuple = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
] , )
| 101
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__a = random.Random()
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case=1.0 , __snake_case=None , __snake_case=None ) -> List[Any]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : str=7 , lowerCamelCase : str=400 , lowerCamelCase : Optional[int]=2000 , lowerCamelCase : str=10 , lowerCamelCase : List[str]=160 , lowerCamelCase : Optional[Any]=8 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Tuple=4000 , lowerCamelCase : List[str]=False , lowerCamelCase : Tuple=True , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase ( self : Dict , lowerCamelCase : int=False , lowerCamelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _flatten(lowerCamelCase : Any ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(lowerCamelCase )[0]
check_json_file_has_correct_format(lowerCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(lowerCamelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(lowerCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(lowerCamelCase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(lowerCamelCase )
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("""id""" ).select(range(lowerCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase ) - 1 ) < 1E-3 ) )
| 708
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase ) , '''Tatoeba directory does not exist.''' )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase )
@slow
def lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=lowerCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 402
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 9
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83
| 0
|
'''simple docstring'''
_A : Optional[int] = 0 # The first color of the flag.
_A : Any = 1 # The second color of the flag.
_A : Dict = 2 # The third color of the flag.
_A : int = (red, white, blue)
def UpperCamelCase_ ( snake_case_ : list ) -> list:
'''simple docstring'''
if not sequence:
return []
if len(snake_case_ ) == 1:
return list(snake_case_ )
__lowerCAmelCase = 0
__lowerCAmelCase = len(snake_case_ ) - 1
__lowerCAmelCase = 0
while mid <= high:
if sequence[mid] == colors[0]:
__lowerCAmelCase , __lowerCAmelCase = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__lowerCAmelCase , __lowerCAmelCase = sequence[high], sequence[mid]
high -= 1
else:
__lowerCAmelCase = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(snake_case_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : List[str] = input('''Enter numbers separated by commas:\n''').strip()
_A : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
print(f'{dutch_national_flag_sort(unsorted)}')
| 715
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int = 10_00 ) -> int:
'''simple docstring'''
__lowerCAmelCase = -1
__lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
__lowerCAmelCase = a * b * c
if candidate >= product:
__lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 330
| 0
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__a: int = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__a: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase_ (*lowerCAmelCase__: Dict ):
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" ) as fh:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_EX )
try:
print(*lowerCAmelCase__ )
finally:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_UN )
a : Tuple = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
a : Union[str, Any] = torch.device('cuda', local_rank)
a : Any = socket.gethostname()
a : Optional[Any] = F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a : Optional[Any] = dist.get_rank()
a : List[Any] = dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 556
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Union[str, Any] , _lowercase : Dict , _lowercase : List[Any]=2 , _lowercase : str=True , _lowercase : Tuple=False , _lowercase : Optional[int]=10 , _lowercase : List[str]=3 , _lowercase : int=32 * 4 , _lowercase : Union[str, Any]=32 * 6 , _lowercase : Dict=4 , _lowercase : str=32 , ):
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ : Tuple = num_queries
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : int = min_size
SCREAMING_SNAKE_CASE__ : List[Any] = max_size
SCREAMING_SNAKE_CASE__ : int = num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_feature_size
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowercase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_lowercase ) > 0.5).long()
SCREAMING_SNAKE_CASE__ : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Any ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Dict = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ : Any = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , config.decoder_config.decoder_layers )
def lowercase__ ( self : Any , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : str=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaskFormerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , output_hidden_states=_lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowercase , _lowercase )
def lowercase__ ( self : Any , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : str , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerForInstanceSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
def comm_check_on_output(_lowercase : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : str = model(_lowercase )
comm_check_on_output(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
pixel_values=_lowercase , pixel_mask=_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
comm_check_on_output(_lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCamelCase : int = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[int] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : int = False
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowercase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase__ ( self : Optional[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Union[str, Any] ):
pass
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE__ : int = MaskFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ : str = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowercase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowercase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowercase ).long(),
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_lowercase , output_attentions=_lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : Optional[int] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_lowercase )
model.to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : Any = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase ).loss
loss.backward()
def lowercase__ ( self : Dict ):
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__ : Any = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_lowercase )
model.to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE__ : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Dict = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a_ :Optional[int] = 1e-4
def a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : List[Any] ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : int = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : str = prepare_img()
SCREAMING_SNAKE_CASE__ : Any = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**_lowercase )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_lowercase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_lowercase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__ : List[Any] = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
SCREAMING_SNAKE_CASE__ : str = torch.tensor(_lowercase ).to(_lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_lowercase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_lowercase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__ : Dict = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
SCREAMING_SNAKE_CASE__ : int = torch.tensor(_lowercase ).to(_lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_lowercase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs['''pixel_values'''].to(_lowercase )
SCREAMING_SNAKE_CASE__ : str = [el.to(_lowercase ) for el in inputs['''mask_labels''']]
SCREAMING_SNAKE_CASE__ : int = [el.to(_lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
| 721
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a_ :Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _UpperCAmelCase ):
def __init__( self : int , _lowercase : Tuple , _lowercase : Optional[Any]=7_68 ):
super().__init__(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = proj_size
SCREAMING_SNAKE_CASE__ : Dict = CLIPVisionModel(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = PaintByExampleMapper(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE__ : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowercase__ ( self : str , _lowercase : Tuple , _lowercase : Optional[int]=False ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model(pixel_values=_lowercase )
SCREAMING_SNAKE_CASE__ : int = clip_output.pooler_output
SCREAMING_SNAKE_CASE__ : List[Any] = self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE__ : Dict = self.final_layer_norm(_lowercase )
SCREAMING_SNAKE_CASE__ : str = self.proj_out(_lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowercase ( nn.Module ):
def __init__( self : Any , _lowercase : Optional[int] ):
super().__init__()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE__ : Tuple = config.hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(_lowercase , _lowercase , _lowercase , activation_fn='''gelu''' , attention_bias=_lowercase )
for _ in range(_lowercase )
] )
def lowercase__ ( self : int , _lowercase : str ):
for block in self.blocks:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = block(_lowercase )
return hidden_states
| 250
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""", UpperCAmelCase__, )
if isinstance(UpperCAmelCase__, torch.Tensor ):
return image
elif isinstance(UpperCAmelCase__, PIL.Image.Image ):
A_ = [image]
if isinstance(image[0], PIL.Image.Image ):
A_ , A_ = image[0].size
A_ , A_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
A_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
A_ = np.concatenate(UpperCAmelCase__, axis=0 )
A_ = np.array(UpperCAmelCase__ ).astype(np.floataa ) / 255.0
A_ = image.transpose(0, 3, 1, 2 )
A_ = 2.0 * image - 1.0
A_ = torch.from_numpy(UpperCAmelCase__ )
elif isinstance(image[0], torch.Tensor ):
A_ = torch.cat(UpperCAmelCase__, dim=0 )
return image
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
if isinstance(UpperCAmelCase__, torch.Tensor ):
return mask
elif isinstance(UpperCAmelCase__, PIL.Image.Image ):
A_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
A_ , A_ = mask[0].size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = [np.array(m.convert("""L""" ).resize((w, h), resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
A_ = np.concatenate(UpperCAmelCase__, axis=0 )
A_ = mask.astype(np.floataa ) / 255.0
A_ = 0
A_ = 1
A_ = torch.from_numpy(UpperCAmelCase__ )
elif isinstance(mask[0], torch.Tensor ):
A_ = torch.cat(UpperCAmelCase__, dim=0 )
return mask
class A__ ( _snake_case ):
lowercase = 42
lowercase = 42
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 250 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 10 , UpperCamelCase__ = 10 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
A_ = image
A_ = _preprocess_image(UpperCamelCase__ )
A_ = original_image.to(device=self.device , dtype=self.unet.dtype )
A_ = _preprocess_mask(UpperCamelCase__ )
A_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
A_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
A_ = original_image.shape
A_ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.device )
A_ = eta
A_ = self.scheduler.timesteps[0] + 1
A_ = generator[0] if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
A_ = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute previous image: x_t -> x_t-1
A_ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
A_ = self.scheduler.undo_step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = t
A_ = (image / 2 + 0.5).clamp(0 , 1 )
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 288
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class A__ ( _snake_case , _snake_case ):
lowercase = "resnet"
lowercase = ["basic", "bottleneck"]
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=64 , UpperCamelCase__=[256, 512, 1024, 2048] , UpperCamelCase__=[3, 4, 6, 3] , UpperCamelCase__="bottleneck" , UpperCamelCase__="relu" , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
A_ = num_channels
A_ = embedding_size
A_ = hidden_sizes
A_ = depths
A_ = layer_type
A_ = hidden_act
A_ = downsample_in_first_stage
A_ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
A_ , A_ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class A__ ( _snake_case ):
lowercase = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 288
| 1
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class __snake_case :
def __init__( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any]=13 , _UpperCAmelCase : Optional[int]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Union[str, Any]=50 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : Tuple = use_input_mask
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : int = scope
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
(
_lowerCAmelCase
) : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : str = True
_lowerCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , **_UpperCAmelCase : Tuple , ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[Any] = BertGenerationEncoder(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
_lowerCAmelCase : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , **_UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[int] = BertGenerationEncoder(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : Dict = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
_lowerCAmelCase : Dict = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : int , **_UpperCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : Optional[int] = BertGenerationDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
# first forward pass
_lowerCAmelCase : Any = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : str = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase : Any = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )["""hidden_states"""][0]
_lowerCAmelCase : List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
_lowerCAmelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Optional[Any] , ) -> str:
'''simple docstring'''
_lowerCAmelCase : List[Any] = BertGenerationDecoder(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case (_a , _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowerCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else ()
lowerCAmelCase__ = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BertGenerationEncoderTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Dict = """bert"""
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
(
_lowerCAmelCase
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCAmelCase : str = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class __snake_case (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
_lowerCAmelCase : Union[str, Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
_lowerCAmelCase : Any = model(_UpperCAmelCase )[0]
_lowerCAmelCase : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , _UpperCAmelCase )
_lowerCAmelCase : List[Any] = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@require_torch
class __snake_case (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
_lowerCAmelCase : Tuple = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(_UpperCAmelCase )[0]
_lowerCAmelCase : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , _UpperCAmelCase )
_lowerCAmelCase : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 713
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase : str = get_logger(__name__)
class __snake_case :
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[str] = None ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
os.path.join(_UpperCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowerCAmelCase : Optional[int] = Extractor
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowerCAmelCase : List[Any] = os.path.abspath(_UpperCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : bool ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(_UpperCAmelCase ) and not (os.path.isdir(_UpperCAmelCase ) and os.listdir(_UpperCAmelCase ))
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> str:
'''simple docstring'''
_lowerCAmelCase : str = self.extractor.infer_extractor_format(_UpperCAmelCase )
if not extractor_format:
return input_path
_lowerCAmelCase : int = self._get_output_path(_UpperCAmelCase )
if self._do_extract(_UpperCAmelCase , _UpperCAmelCase ):
self.extractor.extract(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return output_path
class __snake_case (_a ):
@classmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , _UpperCAmelCase : Union[Path, str] , **_UpperCAmelCase : Optional[int] ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
...
class __snake_case (_a , _a ):
lowerCAmelCase__ = []
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , """rb""" ) as f:
return f.read(_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
_lowerCAmelCase : Optional[int] = max(len(_UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
_lowerCAmelCase : Union[str, Any] = cls.read_magic_number(_UpperCAmelCase , _UpperCAmelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
class __snake_case (_a ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , _UpperCAmelCase : Union[Path, str] , **_UpperCAmelCase : Dict ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(_UpperCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
def resolved(_UpperCAmelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCAmelCase ) )
def badpath(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ).startswith(_UpperCAmelCase )
def badlink(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowerCAmelCase : Tuple = resolved(os.path.join(_UpperCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = resolved(_UpperCAmelCase )
for finfo in members:
if badpath(finfo.name , _UpperCAmelCase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(_UpperCAmelCase , _UpperCAmelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(_UpperCAmelCase , _UpperCAmelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_lowerCAmelCase : str = tarfile.open(_UpperCAmelCase )
tar_file.extractall(_UpperCAmelCase , members=TarExtractor.safemembers(_UpperCAmelCase , _UpperCAmelCase ) )
tar_file.close()
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x1F\x8B"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with gzip.open(_UpperCAmelCase , """rb""" ) as gzip_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(_UpperCAmelCase , magic_number=_UpperCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCAmelCase , """rb""" ) as fp:
_lowerCAmelCase : Union[str, Any] = _EndRecData(_UpperCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowerCAmelCase : List[Any] = fp.read(_UpperCAmelCase ) # CD is where we expect it to be
if len(_UpperCAmelCase ) == sizeCentralDir:
_lowerCAmelCase : int = struct.unpack(_UpperCAmelCase , _UpperCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with zipfile.ZipFile(_UpperCAmelCase , """r""" ) as zip_file:
zip_file.extractall(_UpperCAmelCase )
zip_file.close()
class __snake_case (_a ):
lowerCAmelCase__ = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with lzma.open(_UpperCAmelCase ) as compressed_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_lowerCAmelCase : str = rarfile.RarFile(_UpperCAmelCase )
rf.extractall(_UpperCAmelCase )
rf.close()
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
_lowerCAmelCase : Any = zstd.ZstdDecompressor()
with open(_UpperCAmelCase , """rb""" ) as ifh, open(_UpperCAmelCase , """wb""" ) as ofh:
dctx.copy_stream(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x42\x5A\x68"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with bza.open(_UpperCAmelCase , """rb""" ) as compressed_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with pyazr.SevenZipFile(_UpperCAmelCase , """r""" ) as archive:
archive.extractall(_UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x04\x22\x4D\x18"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(_UpperCAmelCase , """rb""" ) as compressed_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowerCAmelCase__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ) -> Optional[int]:
'''simple docstring'''
return max(
len(_UpperCAmelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCAmelCase , _UpperCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCAmelCase , magic_number_length=_UpperCAmelCase )
except OSError:
return b""
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bool = False ) -> bool:
'''simple docstring'''
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=_UpperCAmelCase , )
_lowerCAmelCase : str = cls.infer_extractor_format(_UpperCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCAmelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_lowerCAmelCase : str = cls._get_magic_number_max_length()
_lowerCAmelCase : Dict = cls._read_magic_number(_UpperCAmelCase , _UpperCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCAmelCase , magic_number=_UpperCAmelCase ):
return extractor_format
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(_UpperCAmelCase ) , exist_ok=_UpperCAmelCase )
# Prevent parallel extractions
_lowerCAmelCase : Tuple = str(Path(_UpperCAmelCase ).with_suffix(""".lock""" ) )
with FileLock(_UpperCAmelCase ):
shutil.rmtree(_UpperCAmelCase , ignore_errors=_UpperCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = extractor if extractor != """deprecated""" else extractor_format
else:
_lowerCAmelCase : List[Any] = cls.extractors[extractor_format]
return extractor.extract(_UpperCAmelCase , _UpperCAmelCase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=_UpperCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCAmelCase ):
return extractor.extract(_UpperCAmelCase , _UpperCAmelCase )
| 196
| 0
|
from math import factorial, pi
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 30 ) -> float:
if not isinstance(_lowercase , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(_lowercase , _lowercase ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowercase : Dict = float(_lowercase )
lowercase : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_lowercase ) )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 30 ) -> float:
if not isinstance(_lowercase , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(_lowercase , _lowercase ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowercase : Tuple = float(_lowercase )
lowercase : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 336
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __a ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = "arrow" , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ = load_from_cache_file
lowerCAmelCase_ = file_format
lowerCAmelCase_ = Spark(
df=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , working_dir=UpperCAmelCase , **UpperCAmelCase , )
def lowerCamelCase_ ( self ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 552
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Any = XLNetTokenizer
lowerCAmelCase : str = XLNetTokenizerFast
lowerCAmelCase : List[str] = True
lowerCAmelCase : Union[str, Any] = True
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase: List[Any] = XLNetTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: List[str] = '''<s>'''
_UpperCamelCase: Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(_lowercase ) , 1_006 )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: List[str] = XLNetTokenizer(_lowercase , keep_accents=_lowercase )
_UpperCamelCase: Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [285, 46, 10, 170, 382] )
_UpperCamelCase: Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase: Tuple = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_UpperCamelCase: str = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: List[Any] = XLNetTokenizer(_lowercase , do_lower_case=_lowercase )
_UpperCamelCase: Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Tuple = XLNetTokenizer(_lowercase , do_lower_case=_lowercase )
_UpperCamelCase: str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
_UpperCamelCase: Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowercase )
_UpperCamelCase: Optional[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowercase )
_UpperCamelCase: int = tokenizer.build_inputs_with_special_tokens(_lowercase )
_UpperCamelCase: str = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = {'''input_ids''': [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 264
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def lowerCAmelCase_ ( lowercase: SplitDict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase: List[str] = split_dict._to_yaml_list()
assert len(lowercase ) == len(lowercase )
_UpperCamelCase: Tuple = SplitDict._from_yaml_list(lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCamelCase: Optional[Any] = None
# the split name of split_dict takes over the name of the split info object
_UpperCamelCase: Any = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=lowercase ), SplitInfo(dataset_name='''my_dataset''' )] )
def lowerCAmelCase_ ( lowercase: str ) -> Tuple:
'''simple docstring'''
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_UpperCamelCase: Optional[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 264
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : int = 4_2
_snake_case : List[str] = None
_snake_case : List[Any] = None
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = Node(1 )
UpperCAmelCase_ : Optional[int] = Node(2 )
UpperCAmelCase_ : Dict = Node(3 )
UpperCAmelCase_ : Optional[int] = Node(4 )
UpperCAmelCase_ : Union[str, Any] = Node(5 )
return tree
def lowercase__ ( __snake_case : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase__ ( __snake_case : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase__ ( __snake_case : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase__ ( __snake_case : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowercase__ ( __snake_case : Node | None ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
if root is None:
return output
UpperCAmelCase_ : Union[str, Any] = deque([root] )
while process_queue:
UpperCAmelCase_ : Optional[int] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase__ ( __snake_case : Node | None , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = []
def populate_output(__snake_case : Node | None , __snake_case : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_UpperCAmelCase , _UpperCAmelCase )
return output
def lowercase__ ( __snake_case : Node | None , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = []
def populate_output(__snake_case : Node | None , __snake_case : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_UpperCAmelCase , _UpperCAmelCase )
return output
def lowercase__ ( __snake_case : Node | None ):
'''simple docstring'''
if root is None:
return []
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[int] = height(_UpperCAmelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ : int = 1
else:
output.append(get_nodes_from_right_to_left(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ : Tuple = 0
return output
def lowercase__ ( ): # Main function for testing.
'''simple docstring'''
UpperCAmelCase_ : Any = make_tree()
print(F"In-order Traversal: {inorder(_UpperCAmelCase )}" )
print(F"Pre-order Traversal: {preorder(_UpperCAmelCase )}" )
print(F"Post-order Traversal: {postorder(_UpperCAmelCase )}" , '\n' )
print(F"Height of Tree: {height(_UpperCAmelCase )}" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(_UpperCAmelCase ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(_UpperCAmelCase ) + 1 ):
print(F"Level {level}:" , get_nodes_from_left_to_right(_UpperCAmelCase , level=_UpperCAmelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 406
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
snake_case_ : Tuple = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
snake_case_ : Union[str, Any] = F'''https://www.google.com/search?q={query}&num=100'''
snake_case_ : Optional[int] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
snake_case_ : int = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
snake_case_ : List[str] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 212
| 0
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """linear"""
_UpperCamelCase : Union[str, Any] = """cosine"""
_UpperCamelCase : Optional[Any] = """cosine_with_restarts"""
_UpperCamelCase : Tuple = """polynomial"""
_UpperCamelCase : List[str] = """constant"""
_UpperCamelCase : Optional[Any] = """constant_with_warmup"""
_UpperCamelCase : Dict = """piecewise_constant"""
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 ):
return LambdaLR(__SCREAMING_SNAKE_CASE , lambda __SCREAMING_SNAKE_CASE : 1 , last_epoch=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 ):
def lr_lambda(__SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(__SCREAMING_SNAKE_CASE ) / float(max(1.0 , __SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , last_epoch=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 ):
lowercase = {}
lowercase = step_rules.split(',' )
for rule_str in rule_list[:-1]:
lowercase , lowercase = rule_str.split(':' )
lowercase = int(__SCREAMING_SNAKE_CASE )
lowercase = float(__SCREAMING_SNAKE_CASE )
lowercase = value
lowercase = float(rule_list[-1] )
def create_rules_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def rule_func(__SCREAMING_SNAKE_CASE ) -> float:
lowercase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase = create_rules_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return LambdaLR(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , last_epoch=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=-1 ):
def lr_lambda(__SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(__SCREAMING_SNAKE_CASE ) / float(max(1 , __SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.5 , __SCREAMING_SNAKE_CASE = -1 ):
def lr_lambda(__SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(__SCREAMING_SNAKE_CASE ) / float(max(1 , __SCREAMING_SNAKE_CASE ) )
lowercase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = -1 ):
def lr_lambda(__SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(__SCREAMING_SNAKE_CASE ) / float(max(1 , __SCREAMING_SNAKE_CASE ) )
lowercase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1e-7 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=-1 ):
lowercase = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(__SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(__SCREAMING_SNAKE_CASE ) / float(max(1 , __SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase = lr_init - lr_end
lowercase = num_training_steps - num_warmup_steps
lowercase = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = -1 , ):
lowercase = SchedulerType(__SCREAMING_SNAKE_CASE )
lowercase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__SCREAMING_SNAKE_CASE , last_epoch=__SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__SCREAMING_SNAKE_CASE , step_rules=__SCREAMING_SNAKE_CASE , last_epoch=__SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__SCREAMING_SNAKE_CASE , num_warmup_steps=__SCREAMING_SNAKE_CASE , last_epoch=__SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__SCREAMING_SNAKE_CASE , num_warmup_steps=__SCREAMING_SNAKE_CASE , num_training_steps=__SCREAMING_SNAKE_CASE , num_cycles=__SCREAMING_SNAKE_CASE , last_epoch=__SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__SCREAMING_SNAKE_CASE , num_warmup_steps=__SCREAMING_SNAKE_CASE , num_training_steps=__SCREAMING_SNAKE_CASE , power=__SCREAMING_SNAKE_CASE , last_epoch=__SCREAMING_SNAKE_CASE , )
return schedule_func(
__SCREAMING_SNAKE_CASE , num_warmup_steps=__SCREAMING_SNAKE_CASE , num_training_steps=__SCREAMING_SNAKE_CASE , last_epoch=__SCREAMING_SNAKE_CASE )
| 565
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 6008_5147_5143 ):
try:
lowercase = int(__SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowercase = 1
lowercase = 2
while i * i <= n:
while n % i == 0:
lowercase = i
n //= i
i += 1
if n > 1:
lowercase = n
return int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565
| 1
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
A_ = None
A_ = {
"7B": 1_1_0_0_8,
"13B": 1_3_8_2_4,
"30B": 1_7_9_2_0,
"65B": 2_2_0_1_6,
"70B": 2_8_6_7_2,
}
A_ = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase=1 ,UpperCAmelCase=256 )-> List[Any]:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCAmelCase ( UpperCAmelCase )-> List[Any]:
'''simple docstring'''
with open(UpperCAmelCase ,'''r''' ) as f:
return json.load(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> List[Any]:
'''simple docstring'''
with open(UpperCAmelCase ,'''w''' ) as f:
json.dump(UpperCAmelCase ,UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=True )-> Tuple:
'''simple docstring'''
os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,'''tmp''' )
os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = read_json(os.path.join(UpperCAmelCase ,'''params.json''' ) )
SCREAMING_SNAKE_CASE_ = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE_ = params['''n_layers''']
SCREAMING_SNAKE_CASE_ = params['''n_heads''']
SCREAMING_SNAKE_CASE_ = n_heads // num_shards
SCREAMING_SNAKE_CASE_ = params['''dim''']
SCREAMING_SNAKE_CASE_ = dim // n_heads
SCREAMING_SNAKE_CASE_ = 1_0_0_0_0.0
SCREAMING_SNAKE_CASE_ = 1.0 / (base ** (torch.arange(0 ,UpperCAmelCase ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE_ = params['''n_kv_heads'''] # for GQA / MQA
SCREAMING_SNAKE_CASE_ = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE_ = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE_ = n_heads
SCREAMING_SNAKE_CASE_ = n_heads_per_shard
SCREAMING_SNAKE_CASE_ = dim
# permute for sliced rotary
def permute(UpperCAmelCase ,UpperCAmelCase=n_heads ,UpperCAmelCase=dim ,UpperCAmelCase=dim ):
return w.view(UpperCAmelCase ,dima // n_heads // 2 ,2 ,UpperCAmelCase ).transpose(1 ,2 ).reshape(UpperCAmelCase ,UpperCAmelCase )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE_ = torch.load(os.path.join(UpperCAmelCase ,'''consolidated.00.pth''' ) ,map_location='''cpu''' )
else:
# Sharded
SCREAMING_SNAKE_CASE_ = [
torch.load(os.path.join(UpperCAmelCase ,f'''consolidated.{i:02d}.pth''' ) ,map_location='''cpu''' )
for i in range(UpperCAmelCase )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = {'''weight_map''': {}}
for layer_i in range(UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE_ = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE_ = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE_ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
for i in range(UpperCAmelCase )
] ,dim=0 ,).reshape(UpperCAmelCase ,UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
for i in range(UpperCAmelCase )
] ,dim=0 ,).reshape(UpperCAmelCase ,UpperCAmelCase ) ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,)
SCREAMING_SNAKE_CASE_ = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
for i in range(UpperCAmelCase )
] ,dim=0 ,).reshape(UpperCAmelCase ,UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(UpperCAmelCase )] ,dim=1 )
SCREAMING_SNAKE_CASE_ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(UpperCAmelCase )] ,dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(UpperCAmelCase )] ,dim=1 )
SCREAMING_SNAKE_CASE_ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(UpperCAmelCase )] ,dim=0 )
SCREAMING_SNAKE_CASE_ = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE_ = filename
param_count += v.numel()
torch.save(UpperCAmelCase ,os.path.join(UpperCAmelCase ,UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE_ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
SCREAMING_SNAKE_CASE_ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(UpperCAmelCase )] ,dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(UpperCAmelCase )] ,dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE_ = filename
param_count += v.numel()
torch.save(UpperCAmelCase ,os.path.join(UpperCAmelCase ,UpperCAmelCase ) )
# Write configs
SCREAMING_SNAKE_CASE_ = {'''total_size''': param_count * 2}
write_json(UpperCAmelCase ,os.path.join(UpperCAmelCase ,'''pytorch_model.bin.index.json''' ) )
SCREAMING_SNAKE_CASE_ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
SCREAMING_SNAKE_CASE_ = params['''multiple_of'''] if '''multiple_of''' in params else 256
SCREAMING_SNAKE_CASE_ = LlamaConfig(
hidden_size=UpperCAmelCase ,intermediate_size=compute_intermediate_size(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) ,num_attention_heads=params['''n_heads'''] ,num_hidden_layers=params['''n_layers'''] ,rms_norm_eps=params['''norm_eps'''] ,num_key_value_heads=UpperCAmelCase ,)
config.save_pretrained(UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained(UpperCAmelCase ,torch_dtype=torch.floataa ,low_cpu_mem_usage=UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(UpperCAmelCase ,safe_serialization=UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE_ = tokenizer_class(UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
def UpperCAmelCase ( )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' ,help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' ,)
parser.add_argument(
'''--model_size''' ,choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] ,)
parser.add_argument(
'''--output_dir''' ,help='''Location to write HF model and tokenizer''' ,)
parser.add_argument('''--safe_serialization''' ,type=UpperCAmelCase ,help='''Whether or not to save using `safetensors`.''' )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
SCREAMING_SNAKE_CASE_ = os.path.join(args.input_dir ,'''tokenizer.model''' )
write_tokenizer(args.output_dir ,UpperCAmelCase )
if __name__ == "__main__":
main()
| 393
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : int = """"""
UpperCAmelCase : List[str] = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[DatasetInfo] = None , lowerCAmelCase_ : Optional[str] = None , **lowerCAmelCase_ : str , ) -> int:
"""simple docstring"""
super().__init__(self , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = repo_info
SCREAMING_SNAKE_CASE_ = token
SCREAMING_SNAKE_CASE_ = None
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
if self.dir_cache is None:
SCREAMING_SNAKE_CASE_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE_ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowerCAmelCase_ ): {'''name''': str(lowerCAmelCase_ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowercase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , **lowerCAmelCase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(self.repo_info , lowerCAmelCase_ ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
SCREAMING_SNAKE_CASE_ = hf_hub_url(self.repo_info.id , lowerCAmelCase_ , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase_ , mode=lowerCAmelCase_ , headers=get_authentication_headers_for_url(lowerCAmelCase_ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def _lowercase ( self : str , lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Any ) -> Tuple:
"""simple docstring"""
self._get_dirs()
SCREAMING_SNAKE_CASE_ = self._strip_protocol(lowerCAmelCase_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase_ )
def _lowercase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=False , **lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._get_dirs()
SCREAMING_SNAKE_CASE_ = PurePosixPath(path.strip('''/''' ) )
SCREAMING_SNAKE_CASE_ = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE_ = PurePosixPath(p.strip('''/''' ) )
SCREAMING_SNAKE_CASE_ = p.parent
if root == path:
SCREAMING_SNAKE_CASE_ = f
SCREAMING_SNAKE_CASE_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 393
| 1
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCAmelCase : Any = logging.get_logger(__name__)
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : List[str] = '''vision-encoder-decoder'''
SCREAMING_SNAKE_CASE : str = True
def __init__( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('encoder' )
SCREAMING_SNAKE_CASE_ : Any = encoder_config.pop('model_type' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('decoder' )
SCREAMING_SNAKE_CASE_ : List[Any] = decoder_config.pop('model_type' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = True
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.encoder.to_dict()
SCREAMING_SNAKE_CASE_ : List[Any] = self.decoder.to_dict()
SCREAMING_SNAKE_CASE_ : List[Any] = self.__class__.model_type
return output
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('''1.11''')
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return 1e-4
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _A ( __magic_name__):
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = OrderedDict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
SCREAMING_SNAKE_CASE_ : List[Any] = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : List[Any] = OrderedDict()
SCREAMING_SNAKE_CASE_ : int = super().generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = dummy_input['input_ids'].shape
SCREAMING_SNAKE_CASE_ : Dict = (batch, encoder_sequence, self._config.encoder_hidden_size)
SCREAMING_SNAKE_CASE_ : List[str] = dummy_input.pop('input_ids' )
SCREAMING_SNAKE_CASE_ : Any = dummy_input.pop('attention_mask' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.zeros(_SCREAMING_SNAKE_CASE )
return common_inputs
class _A ( __magic_name__):
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "default" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 353
|
from __future__ import annotations
import math
lowerCAmelCase : List[str] = '2020.9.26'
lowerCAmelCase : List[Any] = 'xcodz-dot, cclaus, dhruvmanila'
def A_ ( a , a , a , a , a ):
"""simple docstring"""
if not all(isinstance(a , (float, int) ) for val in locals().values() ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ((x * distance) / (z + distance)) * scale
SCREAMING_SNAKE_CASE_ : int = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def A_ ( a , a , a , a , a ):
"""simple docstring"""
if not isinstance(a , a ):
raise TypeError('Axis must be a str' )
SCREAMING_SNAKE_CASE_ : Any = locals()
del input_variables["axis"]
if not all(isinstance(a , (float, int) ) for val in input_variables.values() ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
'Input values except axis must either be float or int: '
f"{list(input_variables.values() )}"
)
raise TypeError(a )
SCREAMING_SNAKE_CASE_ : List[Any] = (angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
SCREAMING_SNAKE_CASE_ : List[str] = x * math.cos(a ) - y * math.sin(a )
SCREAMING_SNAKE_CASE_ : List[Any] = y * math.cos(a ) + x * math.sin(a )
SCREAMING_SNAKE_CASE_ : Dict = z
elif axis == "x":
SCREAMING_SNAKE_CASE_ : Any = y * math.cos(a ) - z * math.sin(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = z * math.cos(a ) + y * math.sin(a )
SCREAMING_SNAKE_CASE_ : str = x
elif axis == "y":
SCREAMING_SNAKE_CASE_ : str = x * math.cos(a ) - z * math.sin(a )
SCREAMING_SNAKE_CASE_ : List[str] = z * math.cos(a ) + x * math.sin(a )
SCREAMING_SNAKE_CASE_ : Any = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(F'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 353
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase : Any = logging.get_logger(__name__)
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 495
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowercase : Tuple = "base_with_context"
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[Any]:
_snake_case = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
_snake_case = weights[F'layers_{lyr_num}']
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_snake_case = ly_weight['attention']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[Any]:
_snake_case = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
_snake_case = weights[F'layers_{lyr_num}']
_snake_case = ly_weight['attention']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[Any]:
_snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__A )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_snake_case = weights[F'layers_{lyr_num}']
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
_snake_case = ly_weight['self_attention']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = ly_weight['MultiHeadDotProductAttention_0']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[Any]:
_snake_case = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_snake_case = jnp.tree_util.tree_map(onp.array , __A )
_snake_case = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_snake_case = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
_snake_case = inference.parse_training_gin_file(__A , __A )
_snake_case = inference.InferenceModel(args.checkpoint_path , __A )
_snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
_snake_case = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_snake_case = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_snake_case = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_snake_case = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __A )
_snake_case = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __A )
_snake_case = load_decoder(ta_checkpoint['target']['decoder'] , __A )
_snake_case = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
_snake_case = SpectrogramDiffusionPipeline(
notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
lowercase : str = parser.parse_args()
main(args)
| 495
| 1
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case_ : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
inspect_dataset(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[int] = path + ".py"
assert script_name in os.listdir(__UpperCAmelCase )
assert "__pycache__" not in os.listdir(__UpperCAmelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Tuple = path + ".py"
assert script_name in os.listdir(__UpperCAmelCase )
assert "__pycache__" not in os.listdir(__UpperCAmelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = get_dataset_config_info(__UpperCAmelCase , config_name=__UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
with pytest.raises(__UpperCAmelCase ):
get_dataset_config_info(__UpperCAmelCase , config_name=__UpperCAmelCase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __a ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = get_dataset_config_names(__UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = get_dataset_infos(__UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
lowerCamelCase_ : str = expected_configs[0]
assert expected_config in infos
lowerCamelCase_ : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = get_dataset_infos(__UpperCAmelCase )
assert expected_config in infos
lowerCamelCase_ : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with pytest.raises(__UpperCAmelCase ):
get_dataset_split_names(__UpperCAmelCase , config_name=__UpperCAmelCase )
| 713
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "informer"
lowerCamelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Optional[Any] , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "student_t" , __magic_name__ : str = "nll" , __magic_name__ : int = 1 , __magic_name__ : List[int] = None , __magic_name__ : Optional[Union[str, bool]] = "mean" , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : Optional[List[int]] = None , __magic_name__ : Optional[List[int]] = None , __magic_name__ : int = 64 , __magic_name__ : int = 32 , __magic_name__ : int = 32 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : bool = True , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.05 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 100 , __magic_name__ : float = 0.02 , __magic_name__ : Optional[int]=True , __magic_name__ : str = "prob" , __magic_name__ : int = 5 , __magic_name__ : bool = True , **__magic_name__ : Tuple , ) -> List[str]:
# time series specific configuration
lowerCamelCase_ : Tuple = prediction_length
lowerCamelCase_ : str = context_length or prediction_length
lowerCamelCase_ : Union[str, Any] = distribution_output
lowerCamelCase_ : List[str] = loss
lowerCamelCase_ : Tuple = input_size
lowerCamelCase_ : int = num_time_features
lowerCamelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ : Optional[int] = scaling
lowerCamelCase_ : str = num_dynamic_real_features
lowerCamelCase_ : List[str] = num_static_real_features
lowerCamelCase_ : Any = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCamelCase_ : Dict = cardinality
else:
lowerCamelCase_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCamelCase_ : Dict = embedding_dimension
else:
lowerCamelCase_ : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ : Dict = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ : int = d_model
lowerCamelCase_ : Union[str, Any] = encoder_attention_heads
lowerCamelCase_ : int = decoder_attention_heads
lowerCamelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase_ : Dict = encoder_layers
lowerCamelCase_ : str = decoder_layers
lowerCamelCase_ : Dict = dropout
lowerCamelCase_ : Optional[int] = attention_dropout
lowerCamelCase_ : Dict = activation_dropout
lowerCamelCase_ : List[Any] = encoder_layerdrop
lowerCamelCase_ : Optional[Any] = decoder_layerdrop
lowerCamelCase_ : Optional[int] = activation_function
lowerCamelCase_ : int = init_std
lowerCamelCase_ : str = use_cache
# Informer
lowerCamelCase_ : str = attention_type
lowerCamelCase_ : Union[str, Any] = sampling_factor
lowerCamelCase_ : List[Any] = distil
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 253
| 0
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , lowercase_ , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=10 , lowercase_=[10, 20, 30, 40] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Any = batch_size
_snake_case : Optional[int] = image_size
_snake_case : Optional[Any] = num_channels
_snake_case : Dict = embeddings_size
_snake_case : Optional[Any] = hidden_sizes
_snake_case : Union[str, Any] = depths
_snake_case : Any = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : str = hidden_act
_snake_case : Any = num_labels
_snake_case : str = scope
_snake_case : Optional[int] = len(lowercase_ )
def __a ( self ) -> str:
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_snake_case : Any = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> List[Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __a ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
_snake_case : Dict = TFRegNetModel(config=lowercase_ )
_snake_case : List[Any] = model(lowercase_ , training=lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
_snake_case : str = self.num_labels
_snake_case : Optional[Any] = TFRegNetForImageClassification(lowercase_ )
_snake_case : int = model(lowercase_ , labels=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : str = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[Any] = config_and_inputs
_snake_case : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A (__UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def __a ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : str = TFRegNetModelTester(self )
_snake_case : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def __a ( self ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __a ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def __a ( self ) -> Tuple:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __a ( self ) -> str:
'''simple docstring'''
pass
def __a ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = model_class(lowercase_ )
_snake_case : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : str = [*signature.parameters.keys()]
_snake_case : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __a ( self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Optional[Any] = model_class(lowercase_ )
_snake_case : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
_snake_case : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : str = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case : Optional[Any] = layer_type
_snake_case : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase_ , lowercase_ , lowercase_ , lowercase_={} ):
_snake_case : Union[str, Any] = model(lowercase_ , return_dict=lowercase_ , **lowercase_ )
_snake_case : Any = model(lowercase_ , return_dict=lowercase_ , **lowercase_ ).to_tuple()
def recursive_check(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase_ , lowercase_ ):
recursive_check(lowercase_ , lowercase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase_ , lowercase_ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase_ , lowercase_ )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = model_class(lowercase_ )
_snake_case : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_ )
_snake_case : str = self._prepare_for_class(lowercase_ , lowercase_ )
check_equivalence(lowercase_ , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
_snake_case : int = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
check_equivalence(lowercase_ , lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
_snake_case : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ )
check_equivalence(lowercase_ , lowercase_ , lowercase_ , {'''output_hidden_states''': True} )
_snake_case : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
_snake_case : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
check_equivalence(lowercase_ , lowercase_ , lowercase_ , {'''output_hidden_states''': True} )
def __a ( self ) -> str:
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def __a ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = TFRegNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def A_ ( ) -> Any:
_snake_case : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A (unittest.TestCase ):
@cached_property
def __a ( self ) -> List[str]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __a ( self ) -> str:
'''simple docstring'''
_snake_case : Union[str, Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case : Any = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Optional[Any] = image_processor(images=lowercase_ , return_tensors='''tf''' )
# forward pass
_snake_case : Optional[Any] = model(**lowercase_ , training=lowercase_ )
# verify the logits
_snake_case : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
_snake_case : List[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase_ , atol=1E-4 )
| 326
|
from __future__ import annotations
def A_ ( lowercase_ ) -> bool:
_snake_case : Tuple = str(lowercase_ )
return len(lowercase_ ) == 9 and set(lowercase_ ) == set('''123456789''' )
def A_ ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
_snake_case : str = 100002 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
_snake_case : List[str] = 1002003 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 326
| 1
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCAmelCase_ : Optional[int] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
UpperCAmelCase_ : Optional[Any] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _A (__a , __a=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = create_model(
'''HTSAT-tiny''' , '''roberta''' , __a , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=__a , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = R'''.*sequential.(\d+).*'''
SCREAMING_SNAKE_CASE_ : Optional[int] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace(__a , __a )
if re.match(__a , __a ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.match(__a , __a ).group(1 )
SCREAMING_SNAKE_CASE_ : int = key.replace(f'sequential.{sequential_layer}.' , f'layers.{int(__a )//3}.linear.' )
elif re.match(__a , __a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = int(re.match(__a , __a ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_ : Tuple = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_ : Dict = key.replace(f'_projection.{projecton_layer}.' , f'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_ : List[Any] = value
SCREAMING_SNAKE_CASE_ : Optional[Any] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_ : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_ : Optional[int] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_ : Tuple = query_layer
SCREAMING_SNAKE_CASE_ : Any = key_layer
SCREAMING_SNAKE_CASE_ : Tuple = value_layer
else:
SCREAMING_SNAKE_CASE_ : Any = value
return model_state_dict
def _A (__a , __a , __a , __a=False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = init_clap(__a , enable_fusion=__a )
clap_model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rename_state_dict(__a )
SCREAMING_SNAKE_CASE_ : Dict = ClapConfig()
SCREAMING_SNAKE_CASE_ : int = enable_fusion
SCREAMING_SNAKE_CASE_ : List[str] = ClapModel(__a )
# ignore the spectrogram embedding layer
model.load_state_dict(__a , strict=__a )
model.save_pretrained(__a )
transformers_config.save_pretrained(__a )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
UpperCAmelCase_ : str = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 176
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
UpperCAmelCase_ : Dict = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
UpperCAmelCase_ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
__UpperCamelCase = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
__UpperCamelCase = field(default=3_2 , metadata={"help": "The size of the square patches to use for masking."} )
__UpperCamelCase = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.validation_dir
SCREAMING_SNAKE_CASE_ : List[Any] = data_files if data_files else None
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
__UpperCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Stride to use for the encoder."} , )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : int , lowercase_ : Tuple=192 , lowercase_ : Tuple=32 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[int]=0.6):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = input_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_patch_size
SCREAMING_SNAKE_CASE_ : Dict = model_patch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''')
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''')
SCREAMING_SNAKE_CASE_ : int = self.input_size // self.mask_patch_size
SCREAMING_SNAKE_CASE_ : str = self.mask_patch_size // self.model_patch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rand_size**2
SCREAMING_SNAKE_CASE_ : List[Any] = int(np.ceil(self.token_count * self.mask_ratio))
def __call__( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.permutation(self.token_count)[: self.mask_count]
SCREAMING_SNAKE_CASE_ : Any = np.zeros(self.token_count , dtype=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : Tuple = mask.reshape((self.rand_size, self.rand_size))
SCREAMING_SNAKE_CASE_ : List[Any] = mask.repeat(self.scale , axis=0).repeat(self.scale , axis=1)
return torch.tensor(mask.flatten())
def _A (__a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.stack([example['''pixel_values'''] for example in examples] )
SCREAMING_SNAKE_CASE_ : Tuple = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _A () -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , __a , __a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Tuple = training_args.get_process_log_level()
logger.setLevel(__a )
transformers.utils.logging.set_verbosity(__a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
SCREAMING_SNAKE_CASE_ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE_ : List[str] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __a ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE_ : Tuple = ds['''train'''].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE_ : List[str] = split['''train''']
SCREAMING_SNAKE_CASE_ : Any = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **__a )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **__a )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__a , '''decoder_type''' ):
SCREAMING_SNAKE_CASE_ : Any = '''simmim'''
# adapt config
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_args.image_size if model_args.image_size is not None else config.image_size
SCREAMING_SNAKE_CASE_ : List[str] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
SCREAMING_SNAKE_CASE_ : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE_ : str = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__a )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ : Any = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__a )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoModelForMaskedImageModeling.from_config(__a )
if training_args.do_train:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ds['''train'''].column_names
else:
SCREAMING_SNAKE_CASE_ : Tuple = ds['''validation'''].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE_ : Dict = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE_ : Optional[int] = '''image'''
elif "img" in column_names:
SCREAMING_SNAKE_CASE_ : int = '''img'''
else:
SCREAMING_SNAKE_CASE_ : Any = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
SCREAMING_SNAKE_CASE_ : Optional[int] = Compose(
[
Lambda(lambda __a : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
SCREAMING_SNAKE_CASE_ : List[Any] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__a ):
SCREAMING_SNAKE_CASE_ : List[Any] = [transforms(__a ) for image in examples[image_column_name]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__a )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ : str = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__a )
# Initialize our trainer
SCREAMING_SNAKE_CASE_ : Optional[Any] = Trainer(
model=__a , args=__a , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__a , data_collator=__a , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = last_checkpoint
SCREAMING_SNAKE_CASE_ : Dict = trainer.train(resume_from_checkpoint=__a )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ : List[str] = trainer.evaluate()
trainer.log_metrics('''eval''' , __a )
trainer.save_metrics('''eval''' , __a )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__a )
else:
trainer.create_model_card(**__a )
if __name__ == "__main__":
main()
| 176
| 1
|
"""simple docstring"""
import math
def UpperCAmelCase ( snake_case : Union[str, Any] ):
_lowerCAmelCase:Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase__ )
def UpperCAmelCase ( snake_case : Union[str, Any] = 1 / 12345 ):
_lowerCAmelCase:int = 0
_lowerCAmelCase:List[str] = 0
_lowerCAmelCase:str = 3
while True:
_lowerCAmelCase:Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase__ ):
_lowerCAmelCase:Any = int(lowerCamelCase__ )
total_partitions += 1
if check_partition_perfect(lowerCamelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase__ )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 227
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase )
| 667
| 0
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase ( __UpperCAmelCase ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'width_multiplier' ) )
class UpperCamelCase :
def __init__( self : int , snake_case__ : Optional[Any] , snake_case__ : Tuple=1_3 , snake_case__ : Dict=6_4 , snake_case__ : Dict=2 , snake_case__ : int=3 , snake_case__ : Any="swish" , snake_case__ : str=3 , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=0.02 , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Tuple=1_0 , snake_case__ : Optional[Any]=None , snake_case__ : Any=0.25 , snake_case__ : Tuple=0.0 , snake_case__ : Union[str, Any]=0.0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = make_divisible(5_1_2 * width_multiplier , divisor=8 )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = conv_kernel_size
SCREAMING_SNAKE_CASE = output_stride
SCREAMING_SNAKE_CASE = classifier_dropout_prob
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = width_multiplier
SCREAMING_SNAKE_CASE = ffn_dropout
SCREAMING_SNAKE_CASE = attn_dropout
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self : str ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase ( self : Tuple , snake_case__ : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MobileViTVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__UpperCamelCase =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaModelTester(self )
SCREAMING_SNAKE_CASE = MobileViTVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase ( self : int ):
"""simple docstring"""
pass
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self : str ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any ):
SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 5
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE = 2
for i in range(len(UpperCAmelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = MobileViTVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : int ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ , target_sizes=[(5_0, 6_0)] )
SCREAMING_SNAKE_CASE = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
| 711
|
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case = {'''facebook/blenderbot-3B''': 1_2_8}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Dict = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ['''input_ids''', '''attention_mask''']
A__ : int = BlenderbotTokenizer
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]="replace" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : str="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[int]=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCamelCase ) != add_prefix_space:
_snake_case = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**__lowerCamelCase )
_snake_case = add_prefix_space
_snake_case = '''post_processor'''
_snake_case = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
_snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case = tuple(state['''sep'''] )
if "cls" in state:
_snake_case = tuple(state['''cls'''] )
_snake_case = False
if state.get('''add_prefix_space''' , __lowerCamelCase ) != add_prefix_space:
_snake_case = add_prefix_space
_snake_case = True
if state.get('''trim_offsets''' , __lowerCamelCase ) != trim_offsets:
_snake_case = trim_offsets
_snake_case = True
if changes_to_apply:
_snake_case = getattr(__lowerCamelCase , state.pop('''type''' ) )
_snake_case = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
_snake_case = value
def __UpperCAmelCase ( self : Optional[int] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = kwargs.get('''is_split_into_words''' , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = kwargs.get('''is_split_into_words''' , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
_snake_case = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : str , __lowerCamelCase : "Conversation" ):
"""simple docstring"""
_snake_case = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
_snake_case = ''' '''.join(__lowerCamelCase )
_snake_case = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
_snake_case = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 103
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__snake_case = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='facebook/nllb-200-distilled-600M'
UpperCamelCase_ : Optional[Any] =(
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
UpperCamelCase_ : Dict ='translator'
UpperCamelCase_ : Any =AutoTokenizer
UpperCamelCase_ : Optional[Any] =AutoModelForSeqaSeqLM
UpperCamelCase_ : List[Any] =LANGUAGE_CODES
UpperCamelCase_ : int =['text', 'text', 'text']
UpperCamelCase_ : Union[str, Any] =['text']
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
UpperCamelCase :Optional[int] = self.lang_to_code[src_lang]
UpperCamelCase :Union[str, Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.model.generate(**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
| 658
| 0
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def A ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def A ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case__ ):
http_head("""https://huggingface.co""" )
| 721
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A_ : List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='cifar10' ,metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'The column name of the images in the files.'} )
lowerCamelCase__ : Optional[str] = field(default=A__ ,metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase__ : Optional[str] = field(default=A__ ,metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.1_5 ,metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE__ = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE__ = self.validation_dir
SCREAMING_SNAKE_CASE__ = data_files if data_files else None
@dataclass
class lowerCamelCase :
lowerCamelCase__ : str = field(
default=A__ ,metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCamelCase__ : str = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
lowerCamelCase__ : str = field(default=A__ ,metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
lowerCamelCase__ : float = field(
default=0.7_5 ,metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class lowerCamelCase (A__ ):
lowerCamelCase__ : float = field(
default=1E-3 ,metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
SCREAMING_SNAKE_CASE__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE__ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE__ = ds["""train"""].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE__ = split["""train"""]
SCREAMING_SNAKE_CASE__ = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case__ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(snake_case__ )
if training_args.do_train:
SCREAMING_SNAKE_CASE__ = ds["""train"""].column_names
else:
SCREAMING_SNAKE_CASE__ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE__ = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE__ = """image"""
elif "img" in column_names:
SCREAMING_SNAKE_CASE__ = """img"""
else:
SCREAMING_SNAKE_CASE__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE__ = image_processor.size["""shortest_edge"""]
else:
SCREAMING_SNAKE_CASE__ = (image_processor.size["""height"""], image_processor.size["""width"""])
SCREAMING_SNAKE_CASE__ = Compose(
[
Lambda(lambda snake_case__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(snake_case__ ):
SCREAMING_SNAKE_CASE__ = [transforms(snake_case__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE__ = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
SCREAMING_SNAKE_CASE__ = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = last_checkpoint
SCREAMING_SNAKE_CASE__ = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ = trainer.evaluate()
trainer.log_metrics("""eval""" , snake_case__ )
trainer.save_metrics("""eval""" , snake_case__ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def A ( snake_case__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 616
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCAmelCase = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCAmelCase = "document_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = VisionEncoderDecoderModel
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self : Any , *_a : Any , **_a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_a , **_a )
def __UpperCamelCase ( self : Any , _a : "Image" , _a : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_SCREAMING_SNAKE_CASE =task_prompt.replace('''{user_input}''' , _a )
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenizer(
_a , add_special_tokens=_a , return_tensors='''pt''' ).input_ids
_SCREAMING_SNAKE_CASE =self.pre_processor(_a , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> Tuple:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_a , ).sequences
def __UpperCamelCase ( self : Union[str, Any] , _a : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.pre_processor.batch_decode(_a )[0]
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_SCREAMING_SNAKE_CASE =re.sub(R'''<.*?>''' , '''''' , _a , count=1 ).strip() # remove first task start token
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenajson(_a )
return sequence["answer"]
| 691
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 691
| 1
|
def lowerCAmelCase ( snake_case__ : int = 1000 )-> int:
A_ , A_ = 1, 1
A_ = []
for i in range(1 , n + 1 ):
A_ = prev_numerator + 2 * prev_denominator
A_ = prev_numerator + prev_denominator
if len(str(snake_case__ ) ) > len(str(snake_case__ ) ):
result.append(snake_case__ )
A_ = numerator
A_ = denominator
return len(snake_case__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 608
|
import qiskit
def lowerCAmelCase ( snake_case__ : int , snake_case__ : int )-> qiskit.result.counts.Counts:
A_ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
A_ = qiskit.QuantumCircuit(snake_case__ , snake_case__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A_ = qiskit.execute(snake_case__ , snake_case__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
__magic_name__ : List[Any] = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 608
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class snake_case_ :
A_ = BlenderbotSmallConfig
A_ = {}
A_ = 'gelu'
def __init__( self : Union[str, Any] , _snake_case : Tuple , _snake_case : List[Any]=13 , _snake_case : Union[str, Any]=7 , _snake_case : Optional[int]=True , _snake_case : int=False , _snake_case : int=99 , _snake_case : Optional[Any]=32 , _snake_case : Tuple=2 , _snake_case : List[Any]=4 , _snake_case : str=37 , _snake_case : Tuple=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : str=20 , _snake_case : str=2 , _snake_case : Any=1 , _snake_case : Union[str, Any]=0 , )->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Any = is_training
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = max_position_embeddings
__lowerCAmelCase : List[str] = eos_token_id
__lowerCAmelCase : Dict = pad_token_id
__lowerCAmelCase : Optional[int] = bos_token_id
def UpperCAmelCase__ ( self : Union[str, Any] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase : Optional[int] = prepare_blenderbot_small_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase__ ( self : List[str] , _snake_case : Tuple , _snake_case : List[Any] )->int:
'''simple docstring'''
__lowerCAmelCase : List[str] = TFBlenderbotSmallModel(config=__UpperCAmelCase ).get_decoder()
__lowerCAmelCase : Optional[int] = inputs_dict["""input_ids"""]
__lowerCAmelCase : Any = input_ids[:1, :]
__lowerCAmelCase : List[Any] = inputs_dict["""attention_mask"""][:1, :]
__lowerCAmelCase : List[str] = inputs_dict["""head_mask"""]
__lowerCAmelCase : List[Any] = 1
# first forward pass
__lowerCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowerCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=None , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :str=None , SCREAMING_SNAKE_CASE :Union[str, Any]=None , ) -> Tuple:
if attention_mask is None:
__lowerCAmelCase : Optional[int] = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case_ ( A__ ,A__ ,unittest.TestCase ):
A_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
A_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def UpperCAmelCase__ ( self : Dict )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[str] = TFBlenderbotSmallModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=__UpperCAmelCase )
def UpperCAmelCase__ ( self : Tuple )->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
@require_tokenizers
@require_tf
class snake_case_ ( unittest.TestCase ):
A_ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
A_ = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCAmelCase__ ( self : Dict )->List[str]:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def UpperCAmelCase__ ( self : Optional[int] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.tokenizer(self.src_text , return_tensors="""tf""" )
__lowerCAmelCase : Optional[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , )
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 504
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCamelCase (unittest.TestCase ):
def __init__( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any]=1_3 , __UpperCAmelCase : Tuple=3_0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[Any]=3_2 , __UpperCAmelCase : Any=5 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : Tuple=3_7 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=1_0 , __UpperCAmelCase : Any=0.02 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = FlaxViTModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE__ = (self.patch_size, self.patch_size)
SCREAMING_SNAKE_CASE__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = FlaxViTForImageClassification(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FlaxViTForImageClassification(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : int ) -> None:
SCREAMING_SNAKE_CASE__ = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(__UpperCAmelCase : int , **__UpperCAmelCase : Tuple ):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE__ = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 196
| 0
|
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
if len(_lowerCAmelCase) != len(_lowerCAmelCase):
raise ValueError("The length of profit and weight must be same.")
if max_weight <= 0:
raise ValueError("max_weight must greater than zero.")
if any(p < 0 for p in profit):
raise ValueError("Profit can not be negative.")
if any(w < 0 for w in weight):
raise ValueError("Weight can not be negative.")
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase_ = [p / w for p, w in zip(_lowerCAmelCase , _lowerCAmelCase)]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase_ = sorted(_lowerCAmelCase)
# declaring useful variables
UpperCamelCase_ = len(_lowerCAmelCase)
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase_ = sorted_profit_by_weight[length - i - 1]
UpperCamelCase_ = profit_by_weight.index(_lowerCAmelCase)
UpperCamelCase_ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
UpperCAmelCase : Tuple =[int(x) for x in input("""Input profits separated by spaces: """).split()]
UpperCAmelCase : str =[int(x) for x in input("""Input weights separated by spaces: """).split()]
UpperCAmelCase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 720
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id) , tf.inta)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _lowercase :
'''simple docstring'''
lowercase__ = OPTConfig
lowercase__ = {}
lowercase__ = """gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=16 , snake_case__=2 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , snake_case__=16 , snake_case__=16 , ):
'''simple docstring'''
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = embed_dim
UpperCamelCase_ = word_embed_proj_dim
UpperCamelCase_ = False
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case__ , **self.config_updates , )
UpperCamelCase_ = prepare_opt_inputs_dict(snake_case__ , snake_case__ )
return config, inputs_dict
def _lowerCamelCase ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = TFOPTModel(config=snake_case__ )
UpperCamelCase_ = inputs_dict["input_ids"]
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
@require_tf
class _lowercase (a_ , a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowercase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowercase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 10
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TFOPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case__ , snake_case__ ):
if hasattr(snake_case__ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case__ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
UpperCamelCase_ = model_class(config=snake_case__ )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case__ )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCamelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case__ )
# check that weights remain the same after resizing
UpperCamelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase_ = False
self.assertTrue(snake_case__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case__ )
UpperCamelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase_ = False
self.assertTrue(snake_case__ )
def _lowerCAmelCase (_lowerCAmelCase):
return tf.constant(_lowerCAmelCase , dtype=tf.intaa)
@require_tf
class _lowercase (unittest.TestCase ):
'''simple docstring'''
lowercase__ = 99
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCamelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCamelCase_ = input_ids.shape[0]
UpperCamelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TFOPTModel.from_pretrained("facebook/opt-350m" )
UpperCamelCase_ = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase_ = tf.not_equal(snake_case__ , model.config.pad_token_id )
with tf.GradientTape():
UpperCamelCase_ = model(input_ids=snake_case__ , attention_mask=snake_case__ ).last_hidden_state
UpperCamelCase_ = (1, 11, 512)
self.assertEqual(output.shape , snake_case__ )
UpperCamelCase_ = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-3 ) )
UpperCamelCase_ = tf.function(snake_case__ , jit_compile=snake_case__ )
UpperCamelCase_ = xla_generate(snake_case__ , snake_case__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-2 ) )
@require_tf
@slow
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase_ = "facebook/opt-350m"
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCamelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
UpperCamelCase_ = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ , add_special_tokens=snake_case__ )
UpperCamelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCamelCase_ = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
UpperCamelCase_ = tf.function(snake_case__ , jit_compile=snake_case__ )
UpperCamelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
@require_tf
@slow
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "facebook/opt-125m"
UpperCamelCase_ = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCamelCase_ = []
UpperCamelCase_ = GPTaTokenizer.from_pretrained(snake_case__ )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(snake_case__ , max_length=10 )
UpperCamelCase_ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "facebook/opt-350m"
UpperCamelCase_ = GPTaTokenizer.from_pretrained(snake_case__ )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(snake_case__ )
UpperCamelCase_ = "left"
# use different length sentences to test batching
UpperCamelCase_ = [
"Hello, my dog is a little",
"Today, I",
]
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ )
UpperCamelCase_ = inputs["input_ids"]
UpperCamelCase_ = model.generate(input_ids=snake_case__ , attention_mask=inputs["attention_mask"] )
UpperCamelCase_ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(input_ids=snake_case__ )
UpperCamelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
UpperCamelCase_ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings )
UpperCamelCase_ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
UpperCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
UpperCamelCase_ = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "facebook/opt-350m"
UpperCamelCase_ = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCamelCase_ = []
UpperCamelCase_ = GPTaTokenizer.from_pretrained(snake_case__ )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(snake_case__ , max_length=10 )
UpperCamelCase_ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
| 504
| 0
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __UpperCamelCase ( _a ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=0 ):
UpperCAmelCase__: Any = 1.0 if scale is None else scale
UpperCAmelCase__: int = 0.0 if loc is None else loc
super().__init__(lowerCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase__ )] )
@property
def _UpperCAmelCase ( self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _UpperCAmelCase ( self ):
return self.base_dist.variance * self.scale**2
@property
def _UpperCAmelCase ( self ):
return self.variance.sqrt()
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__: Any = args_dim
UpperCAmelCase__: List[str] = nn.ModuleList([nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) for dim in args_dim.values()] )
UpperCAmelCase__: Any = domain_map
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: str = [proj(lowerCamelCase__ ) for proj in self.proj]
return self.domain_map(*lowerCamelCase__ )
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
super().__init__()
UpperCAmelCase__: str = function
def _UpperCAmelCase ( self , lowerCamelCase__ , *lowerCamelCase__ ):
return self.function(lowerCamelCase__ , *lowerCamelCase__ )
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
def __init__( self , lowerCamelCase__ = 1 ):
UpperCAmelCase__: str = dim
UpperCAmelCase__: Any = {k: dim * self.args_dim[k] for k in self.args_dim}
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if self.dim == 1:
return self.distribution_class(*lowerCamelCase__ )
else:
return Independent(self.distribution_class(*lowerCamelCase__ ) , 1 )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
UpperCAmelCase__: Optional[Any] = self._base_distribution(lowerCamelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase__ , loc=lowerCamelCase__ , scale=lowerCamelCase__ , event_dim=self.event_dim )
@property
def _UpperCAmelCase ( self ):
return () if self.dim == 1 else (self.dim,)
@property
def _UpperCAmelCase ( self ):
return len(self.event_shape )
@property
def _UpperCAmelCase ( self ):
return 0.0
def _UpperCAmelCase ( self , lowerCamelCase__ ):
return ParameterProjection(
in_features=lowerCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _UpperCAmelCase ( self , *lowerCamelCase__ ):
raise NotImplementedError()
@staticmethod
def _UpperCAmelCase ( lowerCamelCase__ ):
return (x + torch.sqrt(torch.square(lowerCamelCase__ ) + 4.0 )) / 2.0
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = {"df": 1, "loc": 1, "scale": 1}
__magic_name__ = StudentT
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Dict = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase__: List[Any] = 2.0 + cls.squareplus(lowerCamelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = {"loc": 1, "scale": 1}
__magic_name__ = Normal
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: str = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = {"total_count": 1, "logits": 1}
__magic_name__ = NegativeBinomial
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Dict = cls.squareplus(lowerCamelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__: str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ ) , 1 )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ):
UpperCAmelCase__ , UpperCAmelCase__: List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 113
|
import unittest
from transformers import DonutProcessor
_lowerCAmelCase : str ="""naver-clova-ix/donut-base"""
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Any = DonutProcessor.from_pretrained(lowerCamelCase__ )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Any = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
UpperCAmelCase__: Optional[Any] = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
UpperCAmelCase__: str = self.processor.tokenajson(lowerCamelCase__ )
self.assertDictEqual(lowerCamelCase__ , lowerCamelCase__ )
| 113
| 1
|
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( a ):
# getting number of pixels in the image
__snake_case , __snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a ):
for j in range(a ):
__snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_lowercase = imread("""image_data/lena.jpg""", 1)
# convert to its negative
_lowercase = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 427
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
_lowercase = logging.getLogger(__name__)
@dataclass
class a_ :
lowercase_ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase_ : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase_ : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase_ : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase_ : bool = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowercase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowercase_ : bool = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class a_ :
lowercase_ : Optional[str] = field(default=UpperCAmelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
lowercase_ : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowercase_ : bool = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase_ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowercase_ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase_ : bool = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
lowercase_ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowercase_ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowercase__ ( self : List[Any] ):
if self.train_file is not None:
__snake_case = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a_ :
lowercase_ : PreTrainedTokenizerBase
lowercase_ : Union[bool, str, PaddingStrategy] = True
lowercase_ : Optional[int] = None
lowercase_ : Optional[int] = None
def __call__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
__snake_case = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case = [feature.pop(__lowerCAmelCase ) for feature in features]
__snake_case = len(__lowerCAmelCase )
__snake_case = len(features[0]['input_ids'] )
__snake_case = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCAmelCase )] for feature in features
]
__snake_case = list(chain(*__lowerCAmelCase ) )
__snake_case = self.tokenizer.pad(
__lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case = {k: v.view(__lowerCAmelCase , __lowerCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case = torch.tensor(__lowerCAmelCase , dtype=torch.intaa )
return batch
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , a , a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case = training_args.get_process_log_level()
logger.setLevel(a )
datasets.utils.logging.set_verbosity(a )
transformers.utils.logging.set_verbosity(a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case = {}
if data_args.train_file is not None:
__snake_case = data_args.train_file
if data_args.validation_file is not None:
__snake_case = data_args.validation_file
__snake_case = data_args.train_file.split('.' )[-1]
__snake_case = load_dataset(
a , data_files=a , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case = [f'ending{i}' for i in range(4 )]
__snake_case = 'sent1'
__snake_case = 'sent2'
if data_args.max_seq_length is None:
__snake_case = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__snake_case = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(a ):
__snake_case = [[context] * 4 for context in examples[context_name]]
__snake_case = examples[question_header_name]
__snake_case = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(a )
]
# Flatten out
__snake_case = list(chain(*a ) )
__snake_case = list(chain(*a ) )
# Tokenize
__snake_case = tokenizer(
a , a , truncation=a , max_length=a , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(a ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case = min(len(a ) , data_args.max_train_samples )
__snake_case = train_dataset.select(range(a ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case = train_dataset.map(
a , batched=a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case = min(len(a ) , data_args.max_eval_samples )
__snake_case = eval_dataset.select(range(a ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case = eval_dataset.map(
a , batched=a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(a ):
__snake_case , __snake_case = eval_predictions
__snake_case = np.argmax(a , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case = Trainer(
model=a , args=a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=a , data_collator=a , compute_metrics=a , )
# Training
if training_args.do_train:
__snake_case = None
if training_args.resume_from_checkpoint is not None:
__snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case = last_checkpoint
__snake_case = trainer.train(resume_from_checkpoint=a )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case = train_result.metrics
__snake_case = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a )
)
__snake_case = min(a , len(a ) )
trainer.log_metrics('train' , a )
trainer.save_metrics('train' , a )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case = trainer.evaluate()
__snake_case = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a )
__snake_case = min(a , len(a ) )
trainer.log_metrics('eval' , a )
trainer.save_metrics('eval' , a )
__snake_case = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**a )
else:
trainer.create_model_card(**a )
def lowerCamelCase__ ( a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 427
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]:
UpperCamelCase = 0
UpperCamelCase = len(snake_case_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase = i + 1
else:
UpperCamelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 1_1, 1_5], 9) = }')
| 301
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase = pytest.mark.integration
@require_faiss
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCamelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
lowerCamelCase__ : int =dset.map(
lambda lowerCamelCase_ , lowerCamelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ )
lowerCamelCase__ : Any =dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : int =dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : List[str] =dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : Tuple =dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCamelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any ={'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : List[Any] ={'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : Dict =Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] =dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Optional[int] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : Any =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict =1
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =index.search(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : int =np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search_batch(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search_batch , queries[0] )
lowerCamelCase__ : List[str] =[scores[0] for scores in total_scores]
lowerCamelCase__ : str =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Optional[int] =FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Union[str, Any] =FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] =FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Any =faiss.IndexFlat(5 )
lowerCamelCase__ : Any =FaissIndex(custom_index=lowerCamelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : int =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : Any =FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : Any =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : str =1
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search(lowerCamelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCAmelCase_ ( snake_case_ : Dict ) ->int:
import faiss
lowerCamelCase__ : List[str] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] ='index.faiss'
lowerCamelCase__ : Optional[Any] =f"""mock://{index_name}"""
index.save(snake_case_ , storage_options=mockfs.storage_options )
lowerCamelCase__ : Dict =FaissIndex.load(snake_case_ , storage_options=mockfs.storage_options )
lowerCamelCase__ : List[Any] =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Union[str, Any] =1
lowerCamelCase__ , lowerCamelCase__ : List[str] =index.search(snake_case_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Union[str, Any] =Elasticsearch()
lowerCamelCase__ : int ={'acknowledged': True}
lowerCamelCase__ : Optional[Any] =ElasticSearchIndex(es_client=lowerCamelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Union[str, Any] ='foo'
lowerCamelCase__ : Optional[Any] ={'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : List[Any] =index.search(lowerCamelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : List[str] ='foo'
lowerCamelCase__ : Union[str, Any] ={'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : List[Any] =index.search(lowerCamelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] =['foo', 'bar', 'foobar']
lowerCamelCase__ : str ={'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =index.search_batch(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =[scores[0] for scores in total_scores]
lowerCamelCase__ : Dict =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ )
# batched queries with timeout
lowerCamelCase__ : str =['foo', 'bar', 'foobar']
lowerCamelCase__ : Any ={'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search_batch(lowerCamelCase_ , request_timeout=30 )
lowerCamelCase__ : List[str] =[scores[0] for scores in total_scores]
lowerCamelCase__ : int =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ )
| 174
| 0
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=2 , lowerCamelCase=56 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=7 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.0_2 , lowerCamelCase=4 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=3 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_attention_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_choices
snake_case__ = rescale_embeddings
snake_case__ = attention_type
snake_case__ = use_bias
snake_case__ = block_size
snake_case__ = num_random_blocks
def A_ ( self ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_attention_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Dict = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_A : int = False
_A : List[Any] = False
def A_ ( self ):
snake_case__ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def A_ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def A_ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def A_ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def A_ ( self ):
super().test_hidden_states_output()
@slow
def A_ ( self ):
for model_class_name in self.all_model_classes:
snake_case__ = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowerCamelCase )
def A_ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
snake_case__ = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
return model(input_ids=lowerCamelCase , attention_mask=lowerCamelCase , **lowerCamelCase )
with self.subTest("JIT Enabled" ):
snake_case__ = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case__ = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1e-5 , lowerCamelCase="outputs" , lowerCamelCase=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 714
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return None
class _SCREAMING_SNAKE_CASE :
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_A : int = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase , "tf" , 12 , **lowerCamelCase )
@require_torch
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase , "pt" , 12 , **lowerCamelCase )
@require_torch
@slow
def A_ ( self ):
from transformers import BertModel
snake_case__ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(lowerCamelCase ) )
vocab_file.flush()
snake_case__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case__ = BertModel(BertConfig(vocab_size=len(lowerCamelCase ) ) )
model.save_pretrained(lowerCamelCase )
self._test_export(lowerCamelCase , "pt" , 12 , lowerCamelCase )
@require_tf
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case__ = self._test_export(lowerCamelCase , "tf" , 12 , **lowerCamelCase )
snake_case__ = quantize(Path(lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case__ = self._test_export(lowerCamelCase , "pt" , 12 , **lowerCamelCase )
snake_case__ = quantize(lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case__ = Path(lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
return path
except Exception as e:
self.fail(lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def A_ ( self ):
from transformers import BertModel
snake_case__ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
snake_case__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase , lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def A_ ( self ):
from transformers import TFBertModel
snake_case__ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
snake_case__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase , lowerCamelCase , "tf" )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = FeatureExtractionPipeline(lowerCamelCase , lowerCamelCase )
snake_case__ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
snake_case__ , snake_case__ , snake_case__ , snake_case__ = infer_shapes(lowerCamelCase , lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def A_ ( self ):
snake_case__ = ["input_ids", "attention_mask", "token_type_ids"]
snake_case__ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
snake_case__ , snake_case__ = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase , lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase ) , set(lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case__ , snake_case__ = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase , lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase ) , 1 )
self.assertEqual(len(lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def A_ ( self ):
snake_case__ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 530
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class __a ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase_ : Any = '''convnextv2'''
def __init__( self : Dict , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1e-12 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Tuple=224 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Optional[int] , )-> List[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = num_stages
UpperCamelCase = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = drop_path_rate
UpperCamelCase = image_size
UpperCamelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 554
|
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __a ( _lowerCAmelCase ):
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
UpperCamelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCamelCase = bertabert.config.encoder.vocab_size
UpperCamelCase = tokenizer.sep_token_id
UpperCamelCase = tokenizer.cls_token_id
UpperCamelCase = 128
UpperCamelCase = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
UpperCamelCase = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
UpperCamelCase = train_dataset.select(range(32 ) )
UpperCamelCase = val_dataset.select(range(16 ) )
UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase = tokenizer(batch["article"] , padding="max_length" , truncation=UpperCAmelCase_ , max_length=512 )
UpperCamelCase = tokenizer(batch["highlights"] , padding="max_length" , truncation=UpperCAmelCase_ , max_length=128 )
UpperCamelCase = inputs.input_ids
UpperCamelCase = inputs.attention_mask
UpperCamelCase = outputs.input_ids
UpperCamelCase = outputs.input_ids.copy()
UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
UpperCamelCase = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Any ):
UpperCamelCase = pred.label_ids
UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="steps" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 554
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase_ ( A : Optional[Any] , A : Optional[int] , A : Tuple=None , A : Tuple=None ):
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCamelCase_ :
'''simple docstring'''
a :Any = OPTConfig
a :Union[str, Any] = {}
a :str = 'gelu'
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=16 , _UpperCAmelCase=16 , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = word_embed_proj_dim
lowerCAmelCase_ = False
def lowercase__ ( self):
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1)
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_UpperCAmelCase , **self.config_updates , )
lowerCAmelCase_ = prepare_opt_inputs_dict(_UpperCAmelCase , _UpperCAmelCase)
return config, inputs_dict
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = TFOPTModel(config=_UpperCAmelCase)
lowerCAmelCase_ = inputs_dict['''input_ids''']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase)
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1)
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
lowerCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase)[0]
lowerCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1]))
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3)
@require_tf
class UpperCamelCase_ ( A , A , unittest.TestCase ):
'''simple docstring'''
a :List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a :int = (TFOPTForCausalLM,) if is_tf_available() else ()
a :Dict = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
a :List[str] = False
a :Tuple = False
a :str = False
a :Tuple = 10
def lowercase__ ( self):
lowerCAmelCase_ = TFOPTModelTester(self)
lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase)
def lowercase__ ( self):
self.config_tester.run_common_tests()
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase)
def lowercase__ ( self):
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_UpperCAmelCase , _UpperCAmelCase):
if hasattr(_UpperCAmelCase , '''weight'''):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_UpperCAmelCase , '''weight'''):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCAmelCase_ = model_class(config=_UpperCAmelCase)
lowerCAmelCase_ = _get_word_embedding_weight(_UpperCAmelCase , model.get_input_embeddings())
lowerCAmelCase_ = _get_word_embedding_weight(_UpperCAmelCase , model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(_UpperCAmelCase)
lowerCAmelCase_ = _get_word_embedding_weight(_UpperCAmelCase , model.get_input_embeddings())
lowerCAmelCase_ = _get_word_embedding_weight(_UpperCAmelCase , model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _UpperCAmelCase)
# check that weights remain the same after resizing
lowerCAmelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
lowerCAmelCase_ = False
self.assertTrue(_UpperCAmelCase)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _UpperCAmelCase)
lowerCAmelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
lowerCAmelCase_ = False
self.assertTrue(_UpperCAmelCase)
def lowerCamelCase_ ( A : Dict ):
"""simple docstring"""
return tf.constant(A , dtype=tf.intaa )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
a :Tuple = 99
def lowercase__ ( self):
lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa) * 2
lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1)
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self):
lowerCAmelCase_ = TFOPTModel.from_pretrained('''facebook/opt-350m''')
lowerCAmelCase_ = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]])
lowerCAmelCase_ = tf.not_equal(_UpperCAmelCase , model.config.pad_token_id)
with tf.GradientTape():
lowerCAmelCase_ = model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase).last_hidden_state
lowerCAmelCase_ = (1, 11, 512)
self.assertEqual(output.shape , _UpperCAmelCase)
lowerCAmelCase_ = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]])
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=4E-3))
lowerCAmelCase_ = tf.function(_UpperCAmelCase , jit_compile=_UpperCAmelCase)
lowerCAmelCase_ = xla_generate(_UpperCAmelCase , _UpperCAmelCase)[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=4E-2))
@require_tf
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self):
super().setUp()
lowerCAmelCase_ = '''facebook/opt-350m'''
def lowercase__ ( self):
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model)
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model)
lowerCAmelCase_ = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ = tokenizer(_UpperCAmelCase , return_tensors='''tf''' , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
lowerCAmelCase_ = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
])
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-4))
lowerCAmelCase_ = tf.function(_UpperCAmelCase , jit_compile=_UpperCAmelCase)
lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-4))
@require_tf
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase__ ( self):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase__ ( self):
lowerCAmelCase_ = '''facebook/opt-125m'''
lowerCAmelCase_ = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(_UpperCAmelCase)
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase)
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(_UpperCAmelCase , return_tensors='''tf''').input_ids
lowerCAmelCase_ = model.generate(_UpperCAmelCase , max_length=10)
lowerCAmelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase)
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def lowercase__ ( self):
lowerCAmelCase_ = '''facebook/opt-350m'''
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(_UpperCAmelCase)
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase)
lowerCAmelCase_ = '''left'''
# use different length sentences to test batching
lowerCAmelCase_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
lowerCAmelCase_ = tokenizer(_UpperCAmelCase , return_tensors='''tf''' , padding=_UpperCAmelCase)
lowerCAmelCase_ = inputs['''input_ids''']
lowerCAmelCase_ = model.generate(input_ids=_UpperCAmelCase , attention_mask=inputs['''attention_mask'''])
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
lowerCAmelCase_ = model.generate(input_ids=_UpperCAmelCase)
lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa))
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
lowerCAmelCase_ = model.generate(input_ids=_UpperCAmelCase , max_length=model.config.max_length - num_paddings)
lowerCAmelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase)
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase)
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase)
lowerCAmelCase_ = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence])
def lowercase__ ( self):
lowerCAmelCase_ = '''facebook/opt-350m'''
lowerCAmelCase_ = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(_UpperCAmelCase)
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase)
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(_UpperCAmelCase , return_tensors='''tf''').input_ids
lowerCAmelCase_ = model.generate(_UpperCAmelCase , max_length=10)
lowerCAmelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase)
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
| 713
|
_snake_case = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 413
| 0
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class snake_case :
lowercase_ = True
lowercase_ = None
# Automatically constructed
lowercase_ = "PIL.Image.Image"
lowercase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowercase_ = field(default='Image' , init=UpperCamelCase_ , repr=UpperCamelCase_ )
def __call__( self : str )-> Dict:
"""simple docstring"""
return self.pa_type
def __lowercase( self : str , a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] )-> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(a_ )
if isinstance(a_ , a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_ , a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowercase( self : Union[str, Any] , a_ : dict , a_ : str=None )-> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE__ : List[str] = {}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a_ ):
SCREAMING_SNAKE_CASE__ : Any = PIL.Image.open(a_ )
else:
SCREAMING_SNAKE_CASE__ : str = path.split('::' )[-1]
try:
SCREAMING_SNAKE_CASE__ : str = string_to_dict(a_ , config.HUB_DATASETS_URL )['repo_id']
SCREAMING_SNAKE_CASE__ : Tuple = token_per_repo_id.get(a_ )
except ValueError:
SCREAMING_SNAKE_CASE__ : Tuple = None
with xopen(a_ , 'rb' , use_auth_token=a_ ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = BytesIO(f.read() )
SCREAMING_SNAKE_CASE__ : str = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase( self : List[str] )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase( self : Optional[Any] , a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] )-> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array([None] * len(a_ ) , type=pa.binary() )
SCREAMING_SNAKE_CASE__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array([None] * len(a_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : str = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
SCREAMING_SNAKE_CASE__ : List[Any] = storage.field('bytes' )
else:
SCREAMING_SNAKE_CASE__ : Dict = pa.array([None] * len(a_ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
SCREAMING_SNAKE_CASE__ : Tuple = storage.field('path' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array([None] * len(a_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE__ : List[Any] = pa.array(
[encode_np_array(np.array(a_ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa.array([None] * len(a_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
def __lowercase( self : List[Any] , a_ : pa.StructArray )-> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_ , 'rb' ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f.read()
return bytes_
SCREAMING_SNAKE_CASE__ : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ : Dict = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE__ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
def _a ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE__ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _a ( lowercase__ : "PIL.Image.Image" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE__ : int = image.format
else:
SCREAMING_SNAKE_CASE__ : Tuple = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowercase__ , format=lowercase__ )
return buffer.getvalue()
def _a ( lowercase__ : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(lowercase__ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
SCREAMING_SNAKE_CASE__ : Tuple = array.dtype
SCREAMING_SNAKE_CASE__ : List[str] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE__ : Dict = dtype.kind
SCREAMING_SNAKE_CASE__ : Dict = dtype.itemsize
SCREAMING_SNAKE_CASE__ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE__ : List[str] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE__ : List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dtype_byteorder + dtype_kind + str(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = np.dtype(lowercase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = PIL.Image.fromarray(array.astype(lowercase__ ) )
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _a ( lowercase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = first_non_null_value(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase__ , np.ndarray ):
SCREAMING_SNAKE_CASE__ : Tuple = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
elif isinstance(lowercase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : Dict = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
else:
return objs
else:
return objs
| 85
|
'''simple docstring'''
import os
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =len(grid[0] )
_UpperCamelCase =len(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =0
_UpperCamelCase =0
_UpperCamelCase =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(n_rows - 3 ):
_UpperCamelCase =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_UpperCamelCase =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_UpperCamelCase =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_UpperCamelCase =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_UpperCamelCase =max(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if max_product > largest:
_UpperCamelCase =max_product
return largest
def _a ():
"""simple docstring"""
_UpperCamelCase =[]
with open(os.path.dirname(__SCREAMING_SNAKE_CASE ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_UpperCamelCase =[[int(__SCREAMING_SNAKE_CASE ) for i in grid[j]] for j in range(len(__SCREAMING_SNAKE_CASE ) )]
return largest_product(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 404
| 0
|
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :str ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = text, pattern
UpperCamelCase__ , UpperCamelCase__ = len(lowerCAmelCase__ ), len(lowerCAmelCase__ )
def lowerCamelCase__ ( self :Union[str, Any] , lowerCamelCase_ :str ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self :int , lowerCamelCase_ :int ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self :List[Any] ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCamelCase__ = self.mismatch_in_text(lowerCAmelCase__ )
if mismatch_index == -1:
positions.append(lowerCAmelCase__ )
else:
UpperCamelCase__ = self.match_in_pattern(self.text[mismatch_index] )
UpperCamelCase__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A : Dict = 'ABAABA'
A : Optional[int] = 'AB'
A : List[Any] = BoyerMooreSearch(text, pattern)
A : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 701
|
"""simple docstring"""
def snake_case__ ( _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase__ = _modexpt(_snake_case , exponent // 2 , _snake_case ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_snake_case , exponent - 1 , _snake_case )) % modulo_value
def snake_case__ ( _snake_case : int = 17_77 , _snake_case : int = 18_55 , _snake_case : int = 8 ):
"""simple docstring"""
UpperCamelCase__ = base
for _ in range(1 , _snake_case ):
UpperCamelCase__ = _modexpt(_snake_case , _snake_case , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['MobileViTFeatureExtractor']
lowerCAmelCase_ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 217
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
lowercase : Optional[int] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , __magic_name__ )
if matches:
lowercase : Optional[int] = float(matches[1] )
lowercase : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase : Optional[int] = 10_01
lowercase : str = '''imagenet-1k-id2label.json'''
lowercase : Optional[Any] = '''huggingface/label-files'''
lowercase : Optional[int] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Any = {int(__magic_name__ ) + 1: v for k, v in idalabel.items()}
lowercase : Any = '''background'''
lowercase : Any = idalabel
lowercase : Any = {v: k for k, v in idalabel.items()}
return config
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ) -> int:
'''simple docstring'''
lowercase : int = get_mobilenet_va_config(__magic_name__ )
# Load 🤗 model
lowercase : Tuple = MobileNetVaForImageClassification(__magic_name__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__magic_name__ , __magic_name__ , __magic_name__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase : List[Any] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
lowercase : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : Optional[Any] = model(**__magic_name__ )
lowercase : int = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
lowercase : str = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase : Optional[Any] = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase : int = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
print('''Pushing to the hub...''' )
lowercase : int = '''google/''' + model_name
image_processor.push_to_hub(__magic_name__ )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 217
| 1
|
'''simple docstring'''
def UpperCamelCase ( a , a ) -> tuple[float, float]:
'''simple docstring'''
# Check if the input is valid
if not len(a ) == len(a ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
__magic_name__ , __magic_name__ , __magic_name__ = equationa
__magic_name__ , __magic_name__ , __magic_name__ = equationa
# Calculate the determinants of the matrices
__magic_name__ = aa * ba - aa * ba
__magic_name__ = ca * ba - ca * ba
__magic_name__ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__magic_name__ = determinant_x / determinant
__magic_name__ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 245
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = TextToVideoSDPipeline
__SCREAMING_SNAKE_CASE :str = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE :Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__SCREAMING_SNAKE_CASE :Union[str, Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case__ ( self : List[Any] ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__magic_name__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__magic_name__ = CLIPTextModel(a__ )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def snake_case__ ( self : List[str] , a__ : List[str] , a__ : int=0 ):
if str(a__ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(a__ )
else:
__magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def snake_case__ ( self : str ):
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = TextToVideoSDPipeline(**a__ )
__magic_name__ = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = self.get_dummy_inputs(a__ )
__magic_name__ = '''np'''
__magic_name__ = sd_pipe(**a__ ).frames
__magic_name__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__magic_name__ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : Tuple ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case__ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__ , expected_max_diff=1E-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def snake_case__ ( self : Tuple ):
pass
def snake_case__ ( self : Union[str, Any] ):
return super().test_progress_bar()
@slow
@skip_mps
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : List[str] ):
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__magic_name__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__magic_name__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__magic_name__ = pipe.to('''cuda''' )
__magic_name__ = '''Spiderman is surfing'''
__magic_name__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
__magic_name__ = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type='''pt''' ).frames
__magic_name__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__magic_name__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__magic_name__ = pipe.to('''cuda''' )
__magic_name__ = '''Spiderman is surfing'''
__magic_name__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
__magic_name__ = pipe(a__ , generator=a__ , num_inference_steps=2 , output_type='''pt''' ).frames
__magic_name__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 245
| 1
|
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase ( a__ : Optional[int] , a__ : Dict=False ) -> Tuple:
try:
_UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCamelCase = strtobool(a__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase = parse_flag_from_env("""RUN_SLOW""", default=False)
UpperCAmelCase = parse_flag_from_env("""RUN_REMOTE""", default=False)
UpperCAmelCase = parse_flag_from_env("""RUN_LOCAL""", default=True)
UpperCAmelCase = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
UpperCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
UpperCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
UpperCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
UpperCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
UpperCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
UpperCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
UpperCAmelCase = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def lowercase ( a__ : Union[str, Any] ) -> str:
try:
import faiss # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires faiss''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> str:
try:
import regex # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires regex''' )(a__ )
return test_case
def lowercase ( a__ : Any ) -> Union[str, Any]:
try:
import elasticsearch # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> Optional[Any]:
try:
import sqlalchemy # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(a__ )
return test_case
def lowercase ( a__ : Any ) -> Union[str, Any]:
if not config.TORCH_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires PyTorch''' )(a__ )
return test_case
def lowercase ( a__ : str ) -> Optional[Any]:
if not config.TF_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(a__ )
return test_case
def lowercase ( a__ : Optional[int] ) -> str:
if not config.JAX_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires JAX''' )(a__ )
return test_case
def lowercase ( a__ : str ) -> int:
if not config.PIL_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires Pillow''' )(a__ )
return test_case
def lowercase ( a__ : Union[str, Any] ) -> Dict:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(a__ )
else:
return test_case
def lowercase ( a__ : List[Any] ) -> Optional[int]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(a__ )
else:
return test_case
def lowercase ( a__ : str ) -> Tuple:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(a__ )
else:
return test_case
def lowercase ( a__ : List[Any] ) -> int:
def _require_spacy_model(a__ : Any ):
try:
import spacy # noqa F401
spacy.load(a__ )
except ImportError:
return unittest.skip('''test requires spacy''' )(a__ )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(a__ ) )(a__ )
else:
return test_case
return _require_spacy_model
def lowercase ( a__ : List[str] ) -> Dict:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(a__ )
else:
return test_case
def lowercase ( a__ : int ) -> Any:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(a__ )
else:
return test_case
def lowercase ( a__ : List[Any] ) -> Optional[int]:
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCamelCase = unittest.skip('''test is slow''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> int:
if not _run_local_tests or _run_local_tests == 0:
_UpperCamelCase = unittest.skip('''test is local''' )(a__ )
return test_case
def lowercase ( a__ : Optional[int] ) -> List[str]:
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCamelCase = unittest.skip('''test is packaged''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> int:
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCamelCase = unittest.skip('''test requires remote''' )(a__ )
return test_case
def lowercase ( *a__ : Dict ) -> List[Any]:
def decorate(cls : str ):
for name, fn in cls.__dict__.items():
if callable(a__ ) and name.startswith('''test''' ):
for decorator in decorators:
_UpperCamelCase = decorator(a__ )
setattr(cls , a__ , a__ )
return cls
return decorate
class UpperCAmelCase_ ( _lowercase):
pass
class UpperCAmelCase_ ( _lowercase):
snake_case__ = 0
snake_case__ = 1
snake_case__ = 2
@contextmanager
def lowercase ( a__ : List[Any]=OfflineSimulationMode.CONNECTION_FAILS , a__ : Dict=1e-16 ) -> Any:
_UpperCamelCase = requests.Session().request
def timeout_request(a__ : Dict , a__ : str , a__ : Optional[Any] , **a__ : Any ):
# Change the url to an invalid url so that the connection hangs
_UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
_UpperCamelCase = timeout
try:
return online_request(a__ , a__ , **a__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCamelCase = url
_UpperCamelCase = e.args[0]
_UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' , F'''OfflineMock[{url}]''' ),)
_UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(a__ : Optional[int] , a__ : int , **a__ : Any ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=a__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , a__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , a__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , a__ ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def lowercase ( *a__ : Optional[int] , **a__ : List[str] ) -> Optional[Any]:
_UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*a__ , **a__ ) as tmp_dir:
try:
os.chdir(a__ )
yield
finally:
os.chdir(a__ )
@contextmanager
def lowercase ( ) -> List[str]:
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase ( ) -> List[Any]:
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase ( a__ : Optional[Any] , a__ : Tuple ) -> List[Any]:
return deepcopy(a__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(a__ ).integers(0 , 100 , 10 ).tolist()
def lowercase ( a__ : Optional[Any] ) -> Tuple:
import decorator
from requests.exceptions import HTTPError
def _wrapper(a__ : Union[str, Any] , *a__ : Tuple , **a__ : Any ):
try:
return func(*a__ , **a__ )
except HTTPError as err:
if str(a__ ).startswith('''500''' ) or str(a__ ).startswith('''502''' ):
pytest.xfail(str(a__ ) )
raise err
return decorator.decorator(_wrapper , a__ )
class UpperCAmelCase_ :
def __init__( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> Any:
_UpperCamelCase = returncode
_UpperCamelCase = stdout
_UpperCamelCase = stderr
async def lowercase ( a__ : int , a__ : Tuple ) -> int:
while True:
_UpperCamelCase = await stream.readline()
if line:
callback(a__ )
else:
break
async def lowercase ( a__ : Tuple , a__ : int=None , a__ : int=None , a__ : Optional[Any]=None , a__ : Optional[int]=False , a__ : List[str]=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(a__ ) )
_UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCamelCase = []
_UpperCamelCase = []
def tee(a__ : Optional[int] , a__ : Optional[int] , a__ : Union[str, Any] , a__ : List[str]="" ):
_UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(a__ )
if not quiet:
print(a__ , a__ , file=a__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda a__ : tee(a__ , a__ , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda a__ : tee(a__ , a__ , sys.stderr , label='''stderr:''' ) ),
] , timeout=a__ , )
return _RunOutput(await p.wait() , a__ , a__ )
def lowercase ( a__ : Any , a__ : List[Any]=None , a__ : List[Any]=None , a__ : Tuple=180 , a__ : Tuple=False , a__ : Optional[int]=True ) -> _RunOutput:
_UpperCamelCase = asyncio.get_event_loop()
_UpperCamelCase = loop.run_until_complete(
_stream_subprocess(a__ , env=a__ , stdin=a__ , timeout=a__ , quiet=a__ , echo=a__ ) )
_UpperCamelCase = ''' '''.join(a__ )
if result.returncode > 0:
_UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowercase ( ) -> List[Any]:
_UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
_UpperCamelCase = re.sub(R'''^gw''' , '''''' , a__ , 0 , re.M )
return int(a__ )
def lowercase ( ) -> str:
_UpperCamelCase = 29500
_UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 420
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 420
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81
| 1
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 610
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[int] = ["""input_features"""]
def __init__( self : int , a_ : Optional[int]=80 , a_ : Any=1_60_00 , a_ : Tuple=1_60 , a_ : Union[str, Any]=30 , a_ : int=4_00 , a_ : List[str]=0.0 , a_ : Dict=False , **a_ : Optional[Any] , ):
super().__init__(
feature_size=a_ , sampling_rate=a_ , padding_value=a_ , return_attention_mask=a_ , **a_ , )
lowerCAmelCase_ : Optional[int] = n_fft
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : str = chunk_length
lowerCAmelCase_ : Optional[int] = chunk_length * sampling_rate
lowerCAmelCase_ : Any = self.n_samples // hop_length
lowerCAmelCase_ : Optional[Any] = sampling_rate
lowerCAmelCase_ : Optional[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a_ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=a_ , norm="slaney" , mel_scale="slaney" , )
def lowerCamelCase ( self : Optional[int] , a_ : np.array ):
lowerCAmelCase_ : List[Any] = spectrogram(
a_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowerCAmelCase_ : Tuple = log_spec[:, :-1]
lowerCAmelCase_ : Dict = np.maximum(a_ , log_spec.max() - 8.0 )
lowerCAmelCase_ : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase ( a_ : List[np.ndarray] , a_ : List[np.ndarray] , a_ : float = 0.0 ):
if attention_mask is not None:
lowerCAmelCase_ : Tuple = np.array(a_ , np.intaa )
lowerCAmelCase_ : Dict = []
for vector, length in zip(a_ , attention_mask.sum(-1 ) ):
lowerCAmelCase_ : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase_ : Union[str, Any] = padding_value
normed_input_values.append(a_ )
else:
lowerCAmelCase_ : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a_ : bool = True , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[bool] = None , a_ : Optional[str] = "max_length" , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : Optional[bool] = None , **a_ : Any , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Tuple = isinstance(a_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Tuple = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
lowerCAmelCase_ : Optional[Any] = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : List[str] = [np.asarray([raw_speech] ).T]
lowerCAmelCase_ : List[Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowerCAmelCase_ : Optional[int] = self.pad(
a_ , padding=a_ , max_length=max_length if max_length else self.n_samples , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase_ : Tuple = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowerCAmelCase_ : str = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowerCAmelCase_ : Dict = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowerCAmelCase_ : List[Any] = [self._np_extract_fbank_features(a_ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a_ ):
lowerCAmelCase_ : Any = [np.asarray(a_ , dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase_ : List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase_ : Union[str, Any] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase_ : List[Any] = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 610
| 1
|
def __UpperCAmelCase ( snake_case_ : Any ):
'''simple docstring'''
UpperCAmelCase: Dict = len(snake_case_ )
UpperCAmelCase: Any = sum(snake_case_ )
UpperCAmelCase: List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
UpperCAmelCase: Dict = True
for i in range(1 , s + 1 ):
UpperCAmelCase: List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
UpperCAmelCase: str = dp[i][j - 1]
if arr[i - 1] <= j:
UpperCAmelCase: int = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
UpperCAmelCase: List[str] = s - 2 * j
break
return diff
| 166
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Any = '''new-model'''
if is_tf_available():
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: int = NewModelConfig
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: int = "bert-base-cased"
UpperCAmelCase: Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: int = TFAutoModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase: Optional[int] = "bert-base-cased"
UpperCAmelCase: Any = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: Tuple = TFAutoModelForPreTraining.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase: str = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case )
UpperCAmelCase , UpperCAmelCase: Optional[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase: List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase: List[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case )
UpperCAmelCase , UpperCAmelCase: Any = TFAutoModelForMaskedLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> str:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase: str = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: int = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case )
UpperCAmelCase , UpperCAmelCase: str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
UpperCAmelCase: str = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: Dict = TFAutoModelForSequenceClassification.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
UpperCAmelCase: Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_tensorflow_probability
def A__ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase: Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: Any = TFAutoModelForTableQuestionAnswering.from_pretrained(__snake_case )
UpperCAmelCase , UpperCAmelCase: Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: Optional[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_4_4_1_0 )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Any = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_4_4_1_0 )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase: int = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: int = copy.deepcopy(model.config )
UpperCAmelCase: int = ["FunnelBaseModel"]
UpperCAmelCase: Optional[Any] = TFAutoModel.from_config(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case )
UpperCAmelCase: Dict = TFAutoModel.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register("new-model" , __snake_case )
UpperCAmelCase: Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__snake_case ):
auto_class.register(__snake_case , __snake_case )
auto_class.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
auto_class.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase: Dict = BertModelTester(self ).get_config()
UpperCAmelCase: str = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase: Tuple = auto_class.from_config(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case )
UpperCAmelCase: Dict = auto_class.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def A__ ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
__snake_case , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase: Union[str, Any] = TFAutoModel.from_pretrained("bert-base" )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__snake_case , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase: int = TFAutoModel.from_pretrained(__snake_case , revision="aaaaaa" )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(
__snake_case , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
UpperCAmelCase: Union[str, Any] = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(__snake_case , "Use `from_pt=True` to load this model" ):
UpperCAmelCase: Any = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: str = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
UpperCAmelCase: List[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase: str = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
UpperCAmelCase: int = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 166
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : str = XLMTokenizer
lowerCamelCase : Optional[int] = False
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowerCAmelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Dict ) -> Optional[int]:
lowerCAmelCase = 'lower newer'
lowerCAmelCase = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase = 'lower'
lowerCAmelCase = ['low', 'er</w>']
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = tokens + ['<unk>']
lowerCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 133
|
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__snake_case ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__snake_case =importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__snake_case =spec.loader.load_module()
__snake_case =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__snake_case =re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__snake_case ={
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def a_ ( ):
lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCAmelCase = False
# source code of `config_class`
lowerCAmelCase = inspect.getsource(lowerCamelCase )
lowerCAmelCase = _re_checkpoint.findall(lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCAmelCase , lowerCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase = True
break
lowerCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = '\n'.join(sorted(lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 133
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] =Features({"question": Value("string" ), "context": Value("string" )} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] =Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
SCREAMING_SNAKE_CASE_ : str ="question"
SCREAMING_SNAKE_CASE_ : str ="context"
SCREAMING_SNAKE_CASE_ : str ="answers"
@property
def _lowerCamelCase ( self : List[Any] ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 434
|
'''simple docstring'''
a__ : Optional[Any] =[
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
a__ : List[str] =[
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
a__ : int =[
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
a__ : str =[
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
a__ : Union[str, Any] =[
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
a__ : int =[
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
a__ : Any =[
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
a__ : Any =[
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 434
| 1
|
def UpperCamelCase_( snake_case__: int = 10_00 ) -> Dict:
UpperCAmelCase__ , UpperCAmelCase__ = 1, 1
UpperCAmelCase__ = 2
while True:
UpperCAmelCase__ = 0
UpperCAmelCase__ = fa + fa
UpperCAmelCase__ , UpperCAmelCase__ = fa, f
index += 1
for _ in str(lowercase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 146
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("T")
class __SCREAMING_SNAKE_CASE (Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = data
a_ = None
def __str__( self ):
"""simple docstring"""
return f'{self.data}'
class __SCREAMING_SNAKE_CASE (Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
a_ = None
def __iter__( self ):
"""simple docstring"""
a_ = self.top
while node:
yield node.data
a_ = node.next
def __str__( self ):
"""simple docstring"""
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _a ( self ):
"""simple docstring"""
return self.top is None
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = Node(UpperCamelCase__ )
if not self.is_empty():
a_ = self.top
a_ = node
def _a ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
a_ = self.top
a_ = self.top.next
return pop_node.data
def _a ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def _a ( self ):
"""simple docstring"""
a_ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 536
| 0
|
import argparse
import json
import subprocess
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = []
UpperCamelCase_ = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
UpperCamelCase_ = subprocess.run(__lowercase , shell=__lowercase , stdout=subprocess.PIPE)
UpperCamelCase_ = output.stdout.decode('utf-8')
UpperCamelCase_ = json.loads(__lowercase)
UpperCamelCase_ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__lowercase)
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w') as fp:
fp.write(json.dumps(__lowercase))
if len(__lowercase) > 0:
UpperCamelCase_ = '\n'.join([x['name'] for x in offline_runners])
raise ValueError(f"""The following runners are offline:\n{failed}""")
if __name__ == "__main__":
def _snake_case (__lowercase):
return values.split(',')
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
snake_case__ : Optional[int] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 618
|
from typing import Any
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validation(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
# Creates data structures and fill initial step
UpperCamelCase_ = {}
UpperCamelCase_ = {}
for state in states_space:
UpperCamelCase_ = observations_space[0]
UpperCamelCase_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowercase)):
UpperCamelCase_ = observations_space[o]
UpperCamelCase_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
# Update probabilities and pointers dicts
UpperCamelCase_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase_ = arg_max
# The final observation
UpperCamelCase_ = observations_space[len(__lowercase) - 1]
# argmax for given final observation
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
UpperCamelCase_ = arg_max
# Process pointers backwards
UpperCamelCase_ = last_state
UpperCamelCase_ = []
for o in range(len(__lowercase) - 1 , -1 , -1):
result.append(__lowercase)
UpperCamelCase_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validate_not_empty(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
_validate_lists(__lowercase , __lowercase)
_validate_dicts(
__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError('There\'s an empty parameter')
def _snake_case (__lowercase , __lowercase):
_validate_list(__lowercase , 'observations_space')
_validate_list(__lowercase , 'states_space')
def _snake_case (__lowercase , __lowercase):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list"""
raise ValueError(__lowercase)
else:
for x in _object:
if not isinstance(__lowercase , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list of strings"""
raise ValueError(__lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , ):
_validate_dict(__lowercase , 'initial_probabilities' , __lowercase)
_validate_nested_dict(__lowercase , 'transition_probabilities')
_validate_nested_dict(__lowercase , 'emission_probabilities')
def _snake_case (__lowercase , __lowercase):
_validate_dict(_object , __lowercase , __lowercase)
for x in _object.values():
_validate_dict(__lowercase , __lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase = False):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a dict"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object):
UpperCamelCase_ = f"""{var_name} all keys must be strings"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object.values()):
UpperCamelCase_ = 'nested dictionary ' if nested else ''
UpperCamelCase_ = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__lowercase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 618
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.