code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase : str = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Dict = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__UpperCAmelCase : Any = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : List[Any] = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = set()
__UpperCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Any = char
return pairs
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : int , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Optional[Any]="replace" , UpperCamelCase : Tuple="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : Union[str, Any]="<unk>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : Tuple="<mask>" , UpperCamelCase : List[Any]=False , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
__UpperCAmelCase : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
__UpperCAmelCase : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
__UpperCAmelCase : Any = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
__UpperCAmelCase : Dict = json.load(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Optional[Any] = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
__UpperCAmelCase : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
__UpperCAmelCase : str = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Tuple = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : str = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Dict ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : Any = tuple(UpperCamelCase )
__UpperCAmelCase : List[Any] = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Union[str, Any] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase : str = bigram
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : int = 0
while i < len(UpperCamelCase ):
try:
__UpperCAmelCase : int = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Optional[int] = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[str] = tuple(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = new_word
if len(UpperCamelCase ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase )
__UpperCAmelCase : Any = """ """.join(UpperCamelCase )
__UpperCAmelCase : Dict = word
return word
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : int = []
for token in re.findall(self.pat , UpperCamelCase ):
__UpperCAmelCase : int = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase )
def lowerCamelCase__ ( self : Any , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = """""".join(UpperCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : List[Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
__UpperCAmelCase : Union[str, Any] = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__UpperCAmelCase : Union[str, Any] = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
__UpperCAmelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : List[str]=False , **UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = """ """ + text
return (text, kwargs)
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase : Optional[int] = None , UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = super()._pad(
encoded_inputs=UpperCamelCase , max_length=UpperCamelCase , padding_strategy=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__UpperCAmelCase : Optional[int] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__UpperCAmelCase : Any = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCamelCase )
if needs_to_be_padded:
__UpperCAmelCase : Tuple = len(UpperCamelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__UpperCAmelCase : List[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__UpperCAmelCase : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 363
|
"""simple docstring"""
from collections.abc import Sequence
def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) )
def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float:
'''simple docstring'''
__UpperCAmelCase : Dict = 0.0
for coeff in reversed(_UpperCamelCase ):
__UpperCAmelCase : Any = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 320
| 0
|
"""simple docstring"""
import argparse
import json
import subprocess
def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Any = (
f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
__UpperCAmelCase : List[Any] = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
__UpperCAmelCase : Optional[int] = output.stdout.decode("""utf-8""" )
__UpperCAmelCase : Dict = json.loads(_UpperCamelCase )
__UpperCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
__UpperCAmelCase : Tuple = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return values.split(""",""" )
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
UpperCAmelCase : Union[str, Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 364
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[int] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase__ :
"""simple docstring"""
__a = PegasusConfig
__a = {}
__a = """gelu"""
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Any = eos_token_id
__UpperCAmelCase : Optional[int] = pad_token_id
__UpperCAmelCase : List[str] = bos_token_id
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 20
__UpperCAmelCase : Tuple = model_class_name(UpperCamelCase )
__UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] )
__UpperCAmelCase ,__UpperCAmelCase : int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCAmelCase : Tuple = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 20
__UpperCAmelCase : int = model_class_name(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] )
__UpperCAmelCase ,__UpperCAmelCase : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCAmelCase : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCAmelCase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__a = True
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self )
__UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Dict = model_class(UpperCamelCase )
@jax.jit
def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ):
return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
__UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : int = model_class(UpperCamelCase )
__UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCAmelCase : Any = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ):
return model.decode(
decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
__UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase )
__UpperCAmelCase : Optional[int] = np.ones((1, 1) )
__UpperCAmelCase : List[str] = model(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCAmelCase : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCAmelCase : List[str] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase )
__UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences
__UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
assert tgt_text == decoded
| 320
| 0
|
"""simple docstring"""
import math
import sys
def lowerCamelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
if number != int(_UpperCamelCase ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
__UpperCAmelCase : Optional[Any] = [-1] * (number + 1)
__UpperCAmelCase : List[str] = 0
for i in range(1 , number + 1 ):
__UpperCAmelCase : Union[str, Any] = sys.maxsize
__UpperCAmelCase : Union[str, Any] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__UpperCAmelCase : Dict = 1 + answers[i - (j**2)]
__UpperCAmelCase : Optional[int] = min(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Optional[int] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
UpperCAmelCase : List[str] = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = EfficientNetConfig()
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""]
__UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""]
__UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""]
__UpperCAmelCase : int = """huggingface/label-files"""
__UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json"""
__UpperCAmelCase : str = 1_0_0_0
__UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Dict = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def lowerCamelCase ( _UpperCamelCase : Any ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : List[str] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , )
return preprocessor
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__UpperCAmelCase : str = sorted(set(_UpperCamelCase ) )
__UpperCAmelCase : Optional[int] = len(_UpperCamelCase )
__UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )}
__UpperCAmelCase : Any = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__UpperCAmelCase : List[str] = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__UpperCAmelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
__UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1]
__UpperCAmelCase : Tuple = """classifier.weight"""
__UpperCAmelCase : Optional[int] = """classifier.bias"""
return key_mapping
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
__UpperCAmelCase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
__UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) )
else:
__UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = model_classes[model_name](
include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , )
__UpperCAmelCase : List[str] = original_model.trainable_variables
__UpperCAmelCase : List[Any] = original_model.non_trainable_variables
__UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__UpperCAmelCase : int = param.numpy()
__UpperCAmelCase : Dict = list(tf_params.keys() )
# Load HuggingFace model
__UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval()
__UpperCAmelCase : Any = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase )
replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Initialize preprocessor and preprocess input image
__UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase )
__UpperCAmelCase : Any = outputs.logits.detach().numpy()
# Original model inference
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase )
__UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 )
__UpperCAmelCase : str = original_model.predict(_UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCamelCase ):
os.mkdir(_UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCamelCase )
preprocessor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
__UpperCAmelCase : List[str] = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_UpperCamelCase )
hf_model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
UpperCAmelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 320
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCAmelCase : Tuple = None
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : str = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : List[str] = {
'google/rembert': 256,
}
UpperCAmelCase : List[Any] = '▁'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = RemBertTokenizer
def __init__( self : Optional[int] , UpperCamelCase : str=None , UpperCamelCase : Tuple=None , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Any="[CLS]" , UpperCamelCase : Optional[Any]="[SEP]" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Union[str, Any]="<pad>" , UpperCamelCase : Dict="[CLS]" , UpperCamelCase : Union[str, Any]="[MASK]" , **UpperCamelCase : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : Tuple = do_lower_case
__UpperCAmelCase : List[str] = remove_space
__UpperCAmelCase : Dict = keep_accents
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCamelCase ) )
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 366
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""keras_nlp"""]
def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ["""keras_nlp"""] )
| 320
| 0
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCAmelCase : Optional[Any] = get_logger(__name__)
UpperCAmelCase : Tuple = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class lowerCamelCase__ :
"""simple docstring"""
@add_start_docstrings(UpperCamelCase )
def __call__( self : Tuple , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCamelCase__ :
"""simple docstring"""
@add_start_docstrings(UpperCamelCase )
def __call__( self : Any , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCamelCase__ ( A ):
"""simple docstring"""
@add_start_docstrings(UpperCamelCase )
def __call__( self : List[Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int , **UpperCamelCase : int ):
'''simple docstring'''
for processor in self:
__UpperCAmelCase : str = inspect.signature(processor.__call__ ).parameters
if len(UpperCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__UpperCAmelCase : Any = processor(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
else:
__UpperCAmelCase : str = processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : float ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__UpperCAmelCase : Any = temperature
def __call__( self : Optional[Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = scores / self.temperature
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase : float , UpperCamelCase : float = -float("""Inf""" ) , UpperCamelCase : int = 1 ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(UpperCamelCase , UpperCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__UpperCAmelCase : Any = top_p
__UpperCAmelCase : int = filter_value
__UpperCAmelCase : List[str] = min_tokens_to_keep
def __call__( self : Optional[Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = lax.top_k(UpperCamelCase , scores.shape[-1] )
__UpperCAmelCase : List[str] = jnp.full_like(UpperCamelCase , self.filter_value )
__UpperCAmelCase : List[Any] = jax.nn.softmax(UpperCamelCase , axis=-1 ).cumsum(axis=-1 )
__UpperCAmelCase : int = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__UpperCAmelCase : Dict = jnp.roll(UpperCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCamelCase )
# min tokens to keep
__UpperCAmelCase : int = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCamelCase )
__UpperCAmelCase : Dict = jnp.where(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[str] = jax.lax.sort_key_val(UpperCamelCase , UpperCamelCase )[-1]
return next_scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : float = -float("""Inf""" ) , UpperCamelCase : int = 1 ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__UpperCAmelCase : Dict = max(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Tuple = filter_value
def __call__( self : Tuple , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = scores.shape
__UpperCAmelCase : Any = jnp.full(batch_size * vocab_size , self.filter_value )
__UpperCAmelCase : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
__UpperCAmelCase : List[Any] = lax.top_k(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Tuple = jnp.broadcast_to((jnp.arange(UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__UpperCAmelCase : str = topk_scores.flatten()
__UpperCAmelCase : Tuple = topk_indices.flatten() + shift
__UpperCAmelCase : str = next_scores_flat.at[topk_indices_flat].set(UpperCamelCase )
__UpperCAmelCase : Tuple = next_scores_flat.reshape(UpperCamelCase , UpperCamelCase )
return next_scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : str = bos_token_id
def __call__( self : List[str] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCAmelCase : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
__UpperCAmelCase : Optional[int] = jnp.where(UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCamelCase )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = max_length
__UpperCAmelCase : Optional[Any] = eos_token_id
def __call__( self : Optional[int] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Any = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCAmelCase : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__UpperCAmelCase : Optional[Any] = jnp.where(UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCamelCase )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(UpperCamelCase , UpperCamelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__UpperCAmelCase : Any = min_length
__UpperCAmelCase : Optional[Any] = eos_token_id
def __call__( self : Any , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__UpperCAmelCase : Any = jnp.where(UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , UpperCamelCase )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = list(UpperCamelCase )
__UpperCAmelCase : Optional[int] = begin_index
def __call__( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 1 - jnp.bool_(cur_len - self.begin_index )
__UpperCAmelCase : Tuple = jnp.where(UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , UpperCamelCase )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase : list ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = list(UpperCamelCase )
def __call__( self : Union[str, Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : int = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = dict(UpperCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__UpperCAmelCase : str = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__UpperCAmelCase : Optional[Any] = force_token_array.at[index].set(UpperCamelCase )
__UpperCAmelCase : Optional[int] = jnp.intaa(UpperCamelCase )
def __call__( self : Tuple , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
def _force_token(UpperCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = scores.shape[0]
__UpperCAmelCase : Optional[int] = self.force_token_array[generation_idx]
__UpperCAmelCase : Tuple = jnp.ones_like(UpperCamelCase , dtype=scores.dtype ) * -float("""inf""" )
__UpperCAmelCase : Union[str, Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__UpperCAmelCase : Tuple = lax.dynamic_update_slice(UpperCamelCase , UpperCamelCase , (0, current_token) )
return new_scores
__UpperCAmelCase : str = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCamelCase ) , lambda: scores , ) , )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = generate_config.eos_token_id
__UpperCAmelCase : Optional[Any] = generate_config.no_timestamps_token_id
__UpperCAmelCase : str = generate_config.no_timestamps_token_id + 1
__UpperCAmelCase : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCamelCase , """max_initial_timestamp_index""" ):
__UpperCAmelCase : Optional[Any] = generate_config.max_initial_timestamp_index
else:
__UpperCAmelCase : int = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__UpperCAmelCase : Union[str, Any] = model_config.vocab_size
def __call__( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(UpperCamelCase : int , UpperCamelCase : Union[str, Any] ):
__UpperCAmelCase : Optional[Any] = jnp.where((cur_len - self.begin_index) >= 1 , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : str = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = jnp.where((cur_len - self.begin_index) < 2 , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCamelCase , UpperCamelCase , )
return jnp.where(
UpperCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , UpperCamelCase , )
__UpperCAmelCase : List[str] = jax.vmap(UpperCamelCase )(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = jnp.where(cur_len == self.begin_index , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCamelCase , )
__UpperCAmelCase : str = self.timestamp_begin + self.max_initial_timestamp_index
__UpperCAmelCase : Tuple = jnp.where(
UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , UpperCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
__UpperCAmelCase : Union[str, Any] = jax.nn.log_softmax(UpperCamelCase , axis=-1 )
def handle_cumulative_probs(UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
__UpperCAmelCase : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__UpperCAmelCase : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , UpperCamelCase , )
__UpperCAmelCase : Tuple = jax.vmap(UpperCamelCase )(UpperCamelCase , UpperCamelCase )
return scores
| 367
|
"""simple docstring"""
UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_UpperCamelCase )
__UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
__UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
__UpperCAmelCase : List[str] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
__UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__UpperCAmelCase : str = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__UpperCAmelCase : List[str] = encoded_data[:-padding]
__UpperCAmelCase : int = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__UpperCAmelCase : Optional[Any] = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
__UpperCAmelCase : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
| 0
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int = 5_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 368
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ):
'''simple docstring'''
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 320
| 0
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : str ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase )
__UpperCAmelCase : int = list(model.children() )[:-2]
__UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase )
__UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) )
__UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 )
__UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )]
__UpperCAmelCase : Any = os.path.dirname(UpperCamelCase )
__UpperCAmelCase : List[str] = tokenizer
__UpperCAmelCase : str = labels
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : int = max_seq_length
__UpperCAmelCase : int = transforms
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) )
__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Any = sentence[: self.max_seq_length]
__UpperCAmelCase : Tuple = torch.zeros(self.n_classes )
__UpperCAmelCase : str = 1
__UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__UpperCAmelCase : List[str] = input_row["""sentence"""]
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ) -> int:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 369
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = LEDTokenizer
__a = LEDTokenizerFast
__a = True
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase ) )
def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , UpperCamelCase )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertNotIn("""labels""" , UpperCamelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : str = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""]
__UpperCAmelCase : int = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[Any] = inputs["""input_ids"""]
__UpperCAmelCase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""]
__UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase )
__UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]]
__UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Any = """A, <mask> AllenNLP sentence."""
__UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 320
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : str ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = WavaVecaForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
__UpperCAmelCase : int = downstream_dict["""projector.weight"""]
__UpperCAmelCase : Optional[int] = downstream_dict["""projector.bias"""]
__UpperCAmelCase : Any = downstream_dict["""model.post_net.linear.weight"""]
__UpperCAmelCase : List[Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : str = WavaVecaForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = downstream_dict["""model.linear.weight"""]
__UpperCAmelCase : str = downstream_dict["""model.linear.bias"""]
return model
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = WavaVecaForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
__UpperCAmelCase : Dict = downstream_dict["""connector.weight"""]
__UpperCAmelCase : Any = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase : str = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__UpperCAmelCase : List[Any] = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__UpperCAmelCase : Dict = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__UpperCAmelCase : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__UpperCAmelCase : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__UpperCAmelCase : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__UpperCAmelCase : Any = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = torch.load(_UpperCamelCase , map_location="""cpu""" )
__UpperCAmelCase : str = checkpoint["""Downstream"""]
__UpperCAmelCase : str = WavaVecaConfig.from_pretrained(_UpperCamelCase )
__UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
_UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__UpperCAmelCase : Tuple = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
__UpperCAmelCase : List[Any] = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
__UpperCAmelCase : Optional[Any] = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase : Optional[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 370
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ):
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Optional[Any] = use_input_mask
__UpperCAmelCase : Tuple = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Optional[int] = block_sizes
__UpperCAmelCase : Optional[Any] = num_decoder_layers
__UpperCAmelCase : Union[str, Any] = d_model
__UpperCAmelCase : Dict = n_head
__UpperCAmelCase : Optional[Any] = d_head
__UpperCAmelCase : Dict = d_inner
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout
__UpperCAmelCase : List[Any] = attention_dropout
__UpperCAmelCase : str = activation_dropout
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : str = 2
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : List[Any] = num_choices
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Dict = initializer_std
# Used in the tests to check the size of the first attention layer
__UpperCAmelCase : Dict = n_head
# Used in the tests to check the size of the first hidden state
__UpperCAmelCase : Dict = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__UpperCAmelCase : List[Any] = self.num_hidden_layers + 2
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[str] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase )
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : List[str] = model(UpperCamelCase )
__UpperCAmelCase : List[Any] = [input_ids, input_mask]
__UpperCAmelCase : Dict = model(UpperCamelCase )
__UpperCAmelCase : Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase )
__UpperCAmelCase : List[str] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase )
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase )
__UpperCAmelCase : int = [input_ids, input_mask]
__UpperCAmelCase : int = model(UpperCamelCase )
__UpperCAmelCase : List[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__UpperCAmelCase : int = False
__UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase )
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase )
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : int = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase )
__UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__UpperCAmelCase : int = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase )
__UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : int = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase )
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Any = model(UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,
) : Dict = config_and_inputs
__UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__a = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__a = False
__a = False
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFFunnelModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
@require_tf
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__a = False
__a = False
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
| 320
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
while b > 0:
if b & 1:
__UpperCAmelCase : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 320
| 0
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : List[str]=None , UpperCamelCase : int=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = list(poly_a or [0] )[:]
__UpperCAmelCase : Optional[Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__UpperCAmelCase : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__UpperCAmelCase : Dict = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__UpperCAmelCase : List[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__UpperCAmelCase : Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__UpperCAmelCase : Dict = self.__multiply()
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(UpperCamelCase ) <= 1:
return dft[0]
#
__UpperCAmelCase : Dict = self.c_max_length // 2
while next_ncol > 0:
__UpperCAmelCase : List[str] = [[] for i in range(UpperCamelCase )]
__UpperCAmelCase : Optional[Any] = self.root**next_ncol
# First half of next step
__UpperCAmelCase : Dict = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__UpperCAmelCase : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__UpperCAmelCase : int = new_dft
__UpperCAmelCase : Tuple = next_ncol // 2
return dft[0]
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.__dft("""A""" )
__UpperCAmelCase : Any = self.__dft("""B""" )
__UpperCAmelCase : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__UpperCAmelCase : Tuple = 2
while next_ncol <= self.c_max_length:
__UpperCAmelCase : Dict = [[] for i in range(UpperCamelCase )]
__UpperCAmelCase : str = self.root ** (next_ncol // 2)
__UpperCAmelCase : int = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__UpperCAmelCase : List[str] = new_inverse_c
next_ncol *= 2
# Unpack
__UpperCAmelCase : int = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
__UpperCAmelCase : List[Any] = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
__UpperCAmelCase : str = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""image_processor""", """tokenizer"""]
__a = """AutoImageProcessor"""
__a = """AutoTokenizer"""
def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : str = self.image_processor
def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
__UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
__UpperCAmelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 320
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """funnel"""
__a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Tuple , UpperCamelCase : Any=30_522 , UpperCamelCase : List[Any]=[4, 4, 4] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=768 , UpperCamelCase : str=12 , UpperCamelCase : Dict=64 , UpperCamelCase : Optional[Any]=3_072 , UpperCamelCase : str="gelu_new" , UpperCamelCase : Dict=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : List[Any]=None , UpperCamelCase : Any=1e-9 , UpperCamelCase : List[str]="mean" , UpperCamelCase : Any="relative_shift" , UpperCamelCase : Dict=True , UpperCamelCase : Dict=True , UpperCamelCase : str=True , **UpperCamelCase : str , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Any = block_sizes
__UpperCAmelCase : Optional[Any] = [1] * len(UpperCamelCase ) if block_repeats is None else block_repeats
assert len(UpperCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__UpperCAmelCase : List[str] = num_decoder_layers
__UpperCAmelCase : Dict = d_model
__UpperCAmelCase : List[str] = n_head
__UpperCAmelCase : Tuple = d_head
__UpperCAmelCase : List[str] = d_inner
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Dict = hidden_dropout
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = initializer_std
__UpperCAmelCase : int = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__UpperCAmelCase : int = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__UpperCAmelCase : Optional[Any] = attention_type
__UpperCAmelCase : str = separate_cls
__UpperCAmelCase : Any = truncate_seq
__UpperCAmelCase : str = pool_q_only
super().__init__(**UpperCamelCase )
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCamelCase__ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Any ):
'''simple docstring'''
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 351
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float:
'''simple docstring'''
__UpperCAmelCase : Tuple = sorted(numsa + numsa )
__UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 320
| 0
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase : Optional[Any] = 'examples/'
UpperCAmelCase : List[Any] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCAmelCase : List[Any] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCAmelCase : Any = 'README.md'
def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
with open(_UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.read()
__UpperCAmelCase : str = REPLACE_PATTERNS[pattern]
__UpperCAmelCase : Tuple = replace.replace("""VERSION""" , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="""examples""" )
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[Any]=False ) -> List[str]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """🤗 Transformers currently provides the following architectures"""
__UpperCAmelCase : Union[str, Any] = """1. Want to contribute a new model?"""
with open(_UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : List[Any] = f.readlines()
# Find the start of the list.
__UpperCAmelCase : Dict = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__UpperCAmelCase : str = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_UpperCamelCase )
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__UpperCAmelCase : str = f.read()
__UpperCAmelCase : Tuple = REPLACE_PATTERNS["""init"""][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : int=False ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__UpperCAmelCase : Dict = default_version.base_version
elif patch:
__UpperCAmelCase : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__UpperCAmelCase : Dict = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__UpperCAmelCase : Optional[Any] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(_UpperCamelCase ) == 0:
__UpperCAmelCase : int = default_version
print(f'''Updating version to {version}.''' )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : str = get_version()
__UpperCAmelCase : Dict = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__UpperCAmelCase : str = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase : Dict = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(_UpperCamelCase ) == 0:
__UpperCAmelCase : List[Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(_UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 352
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5"""
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" )
__UpperCAmelCase : int = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__UpperCAmelCase : Tuple = model.generate(**UpperCamelCase )
__UpperCAmelCase : Tuple = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5"""
__UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Tuple = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase ):
model.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Tuple = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase )
| 320
| 0
|
"""simple docstring"""
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def lowerCamelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(_UpperCamelCase ) )
def lowerCamelCase ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _UpperCamelCase ) if sum_of_digit_factorial(_UpperCamelCase ) == i )
if __name__ == "__main__":
print(F"{solution() = }")
| 353
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320
| 0
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase : Optional[Any] = HfApi()
UpperCAmelCase : Tuple = {}
# fmt: off
UpperCAmelCase : Tuple = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCAmelCase : List[Any] = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCAmelCase : List[str] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCAmelCase : Union[str, Any] = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCAmelCase : List[str] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCAmelCase : List[str] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCAmelCase : Tuple = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCAmelCase : List[str] = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCAmelCase : Dict = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCAmelCase : int = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCAmelCase : Tuple = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCAmelCase : List[Any] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCAmelCase : Optional[int] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCAmelCase : Optional[Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCAmelCase : Any = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase : Any = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith('CompVis'):
UpperCAmelCase : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
UpperCAmelCase : str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase : List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase : List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase : List[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 354
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : List[str] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320
| 0
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> List[str]:
'''simple docstring'''
if index == r:
for j in range(_UpperCamelCase ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase : Any = arr[i]
combination_util(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : str = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , 0 , _UpperCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase : Dict = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 355
|
"""simple docstring"""
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = 1
while len(_UpperCamelCase ) < 1E6:
constant.append(str(_UpperCamelCase ) )
i += 1
__UpperCAmelCase : List[str] = """""".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 320
| 0
|
"""simple docstring"""
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = 1
while len(_UpperCamelCase ) < 1E6:
constant.append(str(_UpperCamelCase ) )
i += 1
__UpperCAmelCase : List[str] = """""".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple=13 , UpperCamelCase : str=7 , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple=True , UpperCamelCase : int=True , UpperCamelCase : Dict=99 , UpperCamelCase : Tuple=32 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : Tuple=4 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Optional[Any]=512 , UpperCamelCase : int=16 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=4 , ):
'''simple docstring'''
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : int = use_attention_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[str] = num_choices
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase , )
return config, input_ids, attention_mask
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase : Any = config_and_inputs
__UpperCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class_name.from_pretrained("""distilbert-base-uncased""" )
__UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCAmelCase : Any = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__UpperCAmelCase : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : Tuple = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
__UpperCAmelCase : int = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase )
__UpperCAmelCase : Tuple = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
| 357
|
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCAmelCase : Optional[Any] = 'scheduler_config.json'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = 1
__a = 2
__a = 3
__a = 4
__a = 5
__a = 6
__a = 7
__a = 8
__a = 9
__a = 10
__a = 11
__a = 12
__a = 13
__a = 14
@dataclass
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = 42
class lowerCamelCase__ :
"""simple docstring"""
__a = SCHEDULER_CONFIG_NAME
__a = []
__a = True
@classmethod
def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] )
__UpperCAmelCase : List[str] = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 320
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase__ :
"""simple docstring"""
def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : str ):
'''simple docstring'''
return None
class lowerCamelCase__ :
"""simple docstring"""
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
return None
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase , """tf""" , 12 , **UpperCamelCase )
@require_torch
@slow
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase , """pt""" , 12 , **UpperCamelCase )
@require_torch
@slow
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
from transformers import BertModel
__UpperCAmelCase : str = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(UpperCamelCase ) )
vocab_file.flush()
__UpperCAmelCase : Union[str, Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__UpperCAmelCase : Optional[int] = BertModel(BertConfig(vocab_size=len(UpperCamelCase ) ) )
model.save_pretrained(UpperCamelCase )
self._test_export(UpperCamelCase , """pt""" , 12 , UpperCamelCase )
@require_tf
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__UpperCAmelCase : Dict = self._test_export(UpperCamelCase , """tf""" , 12 , **UpperCamelCase )
__UpperCAmelCase : int = quantize(Path(UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__UpperCAmelCase : List[Any] = self._test_export(UpperCamelCase , """pt""" , 12 , **UpperCamelCase )
__UpperCAmelCase : Dict = quantize(UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=None , **UpperCamelCase : List[str] ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
__UpperCAmelCase : int = Path(UpperCamelCase ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
return path
except Exception as e:
self.fail(UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
from transformers import BertModel
__UpperCAmelCase : List[str] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__UpperCAmelCase : Tuple = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(UpperCamelCase , UpperCamelCase , """pt""" )
@require_tf
@require_tokenizers
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
from transformers import TFBertModel
__UpperCAmelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__UpperCAmelCase : Dict = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(UpperCamelCase , UpperCamelCase , """tf""" )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = FeatureExtractionPipeline(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[Any] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
__UpperCAmelCase : str = infer_shapes(UpperCamelCase , UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] , UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = ["""input_ids""", """attention_mask""", """token_type_ids"""]
__UpperCAmelCase : Optional[int] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
__UpperCAmelCase : Tuple = ensure_valid_input(FuncContiguousArgs() , UpperCamelCase , UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCamelCase ) , set(UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCamelCase , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__UpperCAmelCase : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() , UpperCamelCase , UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCamelCase ) , 1 )
self.assertEqual(len(UpperCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 358
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
pass
def lowerCamelCase ( _UpperCamelCase : Image ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = np.array(_UpperCamelCase )
__UpperCAmelCase : List[Any] = npimg.shape
return {"hash": hashimage(_UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__a = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
__UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = """facebook/sam-vit-huge"""
__UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase )
__UpperCAmelCase : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : Dict = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] , )
| 320
| 0
|
"""simple docstring"""
from PIL import Image
def lowerCamelCase ( _UpperCamelCase : Image , _UpperCamelCase : int ) -> Image:
'''simple docstring'''
__UpperCAmelCase : List[str] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(_UpperCamelCase : int ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(_UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
UpperCAmelCase : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 359
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : str ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase )
__UpperCAmelCase : int = list(model.children() )[:-2]
__UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase )
__UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) )
__UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 )
__UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )]
__UpperCAmelCase : Any = os.path.dirname(UpperCamelCase )
__UpperCAmelCase : List[str] = tokenizer
__UpperCAmelCase : str = labels
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : int = max_seq_length
__UpperCAmelCase : int = transforms
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Any = sentence[: self.max_seq_length]
__UpperCAmelCase : Tuple = torch.zeros(self.n_classes )
__UpperCAmelCase : str = 1
__UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__UpperCAmelCase : List[str] = input_row["""sentence"""]
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ) -> int:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 320
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """ZinengTang/tvlt-base"""
__UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
def lowerCamelCase__ ( self : Union[str, Any] , **UpperCamelCase : int ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCamelCase )
def lowerCamelCase__ ( self : int , **UpperCamelCase : Any ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : int = self.get_feature_extractor()
__UpperCAmelCase : int = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : int = self.get_feature_extractor()
__UpperCAmelCase : Tuple = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase )
__UpperCAmelCase : Tuple = np.ones([12_000] )
__UpperCAmelCase : List[Any] = feature_extractor(UpperCamelCase , return_tensors="""np""" )
__UpperCAmelCase : Optional[Any] = processor(audio=UpperCamelCase , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : Optional[int] = self.get_feature_extractor()
__UpperCAmelCase : List[Any] = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase )
__UpperCAmelCase : str = np.ones([3, 224, 224] )
__UpperCAmelCase : Any = image_processor(UpperCamelCase , return_tensors="""np""" )
__UpperCAmelCase : List[Any] = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_feature_extractor()
__UpperCAmelCase : Tuple = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase )
__UpperCAmelCase : Dict = np.ones([12_000] )
__UpperCAmelCase : Union[str, Any] = np.ones([3, 224, 224] )
__UpperCAmelCase : Dict = processor(audio=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_image_processor()
__UpperCAmelCase : str = self.get_feature_extractor()
__UpperCAmelCase : Tuple = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 360
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 320
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCAmelCase : Optional[int] = logging.get_logger('transformers.models.speecht5')
UpperCAmelCase : Union[str, Any] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
UpperCAmelCase : List[Any] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
UpperCAmelCase : Optional[int] = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
UpperCAmelCase : str = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
UpperCAmelCase : str = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
UpperCAmelCase : Dict = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
UpperCAmelCase : str = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
UpperCAmelCase : Tuple = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
UpperCAmelCase : Any = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCAmelCase : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase : List[str] = []
UpperCAmelCase : Union[str, Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
UpperCAmelCase : List[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
UpperCAmelCase : Dict = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
UpperCAmelCase : Dict = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCAmelCase : List[Any] = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__UpperCAmelCase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__UpperCAmelCase : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase : List[str] = value
elif weight_type == "weight_g":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase : Any = value
elif weight_type == "bias":
__UpperCAmelCase : Dict = value
elif weight_type == "running_mean":
__UpperCAmelCase : int = value
elif weight_type == "running_var":
__UpperCAmelCase : Dict = value
elif weight_type == "num_batches_tracked":
__UpperCAmelCase : Optional[Any] = value
else:
__UpperCAmelCase : Optional[int] = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCAmelCase : Optional[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
if task == "s2t":
__UpperCAmelCase : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
__UpperCAmelCase : Union[str, Any] = MAPPING_S2T
__UpperCAmelCase : Optional[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Optional[int] = MAPPING_T2S
__UpperCAmelCase : str = IGNORE_KEYS_T2S
elif task == "s2s":
__UpperCAmelCase : str = hf_model.speechta.encoder.prenet.feature_encoder
__UpperCAmelCase : Dict = MAPPING_S2S
__UpperCAmelCase : int = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_UpperCamelCase , _UpperCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
__UpperCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__UpperCAmelCase : Optional[int] = key.split(""".*.""" )
if prefix in name and suffix in name:
__UpperCAmelCase : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__UpperCAmelCase : str = True
if "*" in mapped_key:
__UpperCAmelCase : Dict = name.split(_UpperCamelCase )[0].split(""".""" )[-2]
__UpperCAmelCase : int = mapped_key.replace("""*""" , _UpperCamelCase )
if "weight_g" in name:
__UpperCAmelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : Optional[int] = """weight_v"""
elif "bias" in name:
__UpperCAmelCase : Optional[Any] = """bias"""
elif "weight" in name:
__UpperCAmelCase : Any = """weight"""
elif "running_mean" in name:
__UpperCAmelCase : Union[str, Any] = """running_mean"""
elif "running_var" in name:
__UpperCAmelCase : Tuple = """running_var"""
elif "num_batches_tracked" in name:
__UpperCAmelCase : Any = """num_batches_tracked"""
else:
__UpperCAmelCase : str = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase : Union[str, Any] = name.split(""".""" )
__UpperCAmelCase : Optional[Any] = int(items[0] )
__UpperCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCAmelCase : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCAmelCase : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCAmelCase : str = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : str=None , ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
__UpperCAmelCase : str = SpeechTaConfig.from_pretrained(_UpperCamelCase )
else:
__UpperCAmelCase : int = SpeechTaConfig()
if task == "s2t":
__UpperCAmelCase : Optional[int] = config.max_text_positions
__UpperCAmelCase : str = SpeechTaForSpeechToText(_UpperCamelCase )
elif task == "t2s":
__UpperCAmelCase : Optional[Any] = 1_8_7_6
__UpperCAmelCase : Optional[int] = 6_0_0
__UpperCAmelCase : List[str] = config.max_speech_positions
__UpperCAmelCase : str = SpeechTaForTextToSpeech(_UpperCamelCase )
elif task == "s2s":
__UpperCAmelCase : int = 1_8_7_6
__UpperCAmelCase : int = config.max_speech_positions
__UpperCAmelCase : Union[str, Any] = SpeechTaForSpeechToSpeech(_UpperCamelCase )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
__UpperCAmelCase : Optional[Any] = SpeechTaTokenizer(_UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__UpperCAmelCase : int = AddedToken("""<mask>""" , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
__UpperCAmelCase : Optional[int] = SpeechTaFeatureExtractor()
__UpperCAmelCase : Tuple = SpeechTaProcessor(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__UpperCAmelCase : Optional[int] = torch.load(_UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["""model"""] , _UpperCamelCase , _UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
UpperCAmelCase : Dict = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 361
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase )
__UpperCAmelCase : List[Any] = sum(_UpperCamelCase )
__UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
__UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__UpperCAmelCase : Optional[int] = s - 2 * j
break
return diff
| 320
| 0
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
return number | (1 << position)
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""pixel_values"""]
def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
__UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
__UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" )
__UpperCAmelCase : int = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Optional[Any] = resample
__UpperCAmelCase : Any = do_center_crop
__UpperCAmelCase : int = crop_size
__UpperCAmelCase : Optional[int] = do_rescale
__UpperCAmelCase : List[Any] = rescale_factor
__UpperCAmelCase : Tuple = do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase : List[Any] = do_convert_rgb
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ):
'''simple docstring'''
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ):
'''simple docstring'''
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Dict = size if size is not None else self.size
__UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase )
__UpperCAmelCase : Dict = resample if resample is not None else self.resample
__UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
__UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
__UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
__UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
__UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
__UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
__UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
__UpperCAmelCase : Any = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 320
| 0
|
"""simple docstring"""
import re
def lowerCamelCase ( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
__UpperCAmelCase : str = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_UpperCamelCase , _UpperCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 363
|
"""simple docstring"""
from collections.abc import Sequence
def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) )
def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float:
'''simple docstring'''
__UpperCAmelCase : Dict = 0.0
for coeff in reversed(_UpperCamelCase ):
__UpperCAmelCase : Any = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 320
| 0
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""image_processor""", """tokenizer"""]
__a = """OwlViTImageProcessor"""
__a = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple=None , **UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCamelCase , )
__UpperCAmelCase : Optional[int] = kwargs.pop("""feature_extractor""" )
__UpperCAmelCase : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Union[str, Any] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple="max_length" , UpperCamelCase : str="np" , **UpperCamelCase : List[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )):
__UpperCAmelCase : Tuple = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ):
__UpperCAmelCase : Optional[int] = []
# Maximum number of queries across batch
__UpperCAmelCase : List[str] = max([len(UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase ) != max_num_queries:
__UpperCAmelCase : Optional[int] = t + [""" """] * (max_num_queries - len(UpperCamelCase ))
__UpperCAmelCase : Tuple = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
encodings.append(UpperCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
__UpperCAmelCase : int = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__UpperCAmelCase : Tuple = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCAmelCase : Any = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__UpperCAmelCase : List[str] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCAmelCase : List[Any] = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
__UpperCAmelCase : Tuple = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCAmelCase : Any = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__UpperCAmelCase : int = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
__UpperCAmelCase : int = BatchEncoding()
__UpperCAmelCase : Optional[int] = input_ids
__UpperCAmelCase : Dict = attention_mask
if query_images is not None:
__UpperCAmelCase : Tuple = BatchEncoding()
__UpperCAmelCase : Tuple = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values
__UpperCAmelCase : List[str] = query_pixel_values
if images is not None:
__UpperCAmelCase : str = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
__UpperCAmelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCAmelCase : Dict = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : str ):
'''simple docstring'''
return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , *UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , *UpperCamelCase : int , **UpperCamelCase : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase , )
return self.image_processor_class
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCamelCase , )
return self.image_processor
| 364
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[int] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase__ :
"""simple docstring"""
__a = PegasusConfig
__a = {}
__a = """gelu"""
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Any = eos_token_id
__UpperCAmelCase : Optional[int] = pad_token_id
__UpperCAmelCase : List[str] = bos_token_id
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 20
__UpperCAmelCase : Tuple = model_class_name(UpperCamelCase )
__UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] )
__UpperCAmelCase ,__UpperCAmelCase : int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCAmelCase : Tuple = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 20
__UpperCAmelCase : int = model_class_name(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] )
__UpperCAmelCase ,__UpperCAmelCase : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCAmelCase : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCAmelCase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__a = True
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self )
__UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Dict = model_class(UpperCamelCase )
@jax.jit
def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ):
return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
__UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : int = model_class(UpperCamelCase )
__UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCAmelCase : Any = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ):
return model.decode(
decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
__UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase )
__UpperCAmelCase : Optional[int] = np.ones((1, 1) )
__UpperCAmelCase : List[str] = model(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCAmelCase : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCAmelCase : List[str] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase )
__UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences
__UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
assert tgt_text == decoded
| 320
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : str = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
UpperCAmelCase : List[str] = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = EfficientNetConfig()
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""]
__UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""]
__UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""]
__UpperCAmelCase : int = """huggingface/label-files"""
__UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json"""
__UpperCAmelCase : str = 1_0_0_0
__UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Dict = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def lowerCamelCase ( _UpperCamelCase : Any ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : List[str] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , )
return preprocessor
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__UpperCAmelCase : str = sorted(set(_UpperCamelCase ) )
__UpperCAmelCase : Optional[int] = len(_UpperCamelCase )
__UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )}
__UpperCAmelCase : Any = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__UpperCAmelCase : List[str] = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__UpperCAmelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
__UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1]
__UpperCAmelCase : Tuple = """classifier.weight"""
__UpperCAmelCase : Optional[int] = """classifier.bias"""
return key_mapping
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
__UpperCAmelCase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
__UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) )
else:
__UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = model_classes[model_name](
include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , )
__UpperCAmelCase : List[str] = original_model.trainable_variables
__UpperCAmelCase : List[Any] = original_model.non_trainable_variables
__UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__UpperCAmelCase : int = param.numpy()
__UpperCAmelCase : Dict = list(tf_params.keys() )
# Load HuggingFace model
__UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval()
__UpperCAmelCase : Any = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase )
replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Initialize preprocessor and preprocess input image
__UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase )
__UpperCAmelCase : Any = outputs.logits.detach().numpy()
# Original model inference
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase )
__UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 )
__UpperCAmelCase : str = original_model.predict(_UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCamelCase ):
os.mkdir(_UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCamelCase )
preprocessor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
__UpperCAmelCase : List[str] = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_UpperCamelCase )
hf_model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
UpperCAmelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 320
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : int = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = ['MaskFormerFeatureExtractor']
UpperCAmelCase : Any = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
UpperCAmelCase : List[Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 366
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""keras_nlp"""]
def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ["""keras_nlp"""] )
| 320
| 0
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square(_UpperCamelCase : int , _UpperCamelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__UpperCAmelCase : Optional[int] = update_area_of_max_square(_UpperCamelCase , col + 1 )
__UpperCAmelCase : int = update_area_of_max_square(row + 1 , col + 1 )
__UpperCAmelCase : Optional[Any] = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
__UpperCAmelCase : List[Any] = 1 + min([right, diagonal, down] )
__UpperCAmelCase : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
__UpperCAmelCase : Optional[int] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__UpperCAmelCase : List[str] = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
__UpperCAmelCase : str = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
__UpperCAmelCase : List[Any] = 1 + min([right, diagonal, down] )
__UpperCAmelCase : Optional[Any] = max(largest_square_area[0] , _UpperCamelCase )
__UpperCAmelCase : Tuple = sub_problem_sol
return sub_problem_sol
else:
return 0
__UpperCAmelCase : Dict = [0]
__UpperCAmelCase : Any = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
__UpperCAmelCase : Any = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__UpperCAmelCase : Union[str, Any] = dp_array[row][col + 1]
__UpperCAmelCase : Optional[int] = dp_array[row + 1][col + 1]
__UpperCAmelCase : Optional[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
__UpperCAmelCase : Optional[int] = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = max(dp_array[row][col] , _UpperCamelCase )
else:
__UpperCAmelCase : int = 0
return largest_square_area
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = [0] * (cols + 1)
__UpperCAmelCase : Tuple = [0] * (cols + 1)
__UpperCAmelCase : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__UpperCAmelCase : int = current_row[col + 1]
__UpperCAmelCase : List[Any] = next_row[col + 1]
__UpperCAmelCase : str = next_row[col]
if mat[row][col] == 1:
__UpperCAmelCase : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = max(current_row[col] , _UpperCamelCase )
else:
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Any = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 367
|
"""simple docstring"""
UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_UpperCamelCase )
__UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
__UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
__UpperCAmelCase : List[str] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
__UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__UpperCAmelCase : str = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__UpperCAmelCase : List[str] = encoded_data[:-padding]
__UpperCAmelCase : int = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__UpperCAmelCase : Optional[Any] = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
__UpperCAmelCase : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
| 0
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase )
__UpperCAmelCase : List[Any] = sum(_UpperCamelCase )
__UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
__UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__UpperCAmelCase : Optional[int] = s - 2 * j
break
return diff
| 368
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ):
'''simple docstring'''
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 320
| 0
|
"""simple docstring"""
UpperCAmelCase : Dict = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCAmelCase : List[str] = concatenate_datasets
UpperCAmelCase : Optional[Any] = DownloadConfig
UpperCAmelCase : Tuple = DownloadManager
UpperCAmelCase : List[str] = DownloadMode
UpperCAmelCase : Dict = DownloadConfig
UpperCAmelCase : Tuple = DownloadMode
UpperCAmelCase : List[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 369
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = LEDTokenizer
__a = LEDTokenizerFast
__a = True
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase ) )
def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , UpperCamelCase )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertNotIn("""labels""" , UpperCamelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : str = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""]
__UpperCAmelCase : int = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[Any] = inputs["""input_ids"""]
__UpperCAmelCase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""]
__UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase )
__UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]]
__UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Any = """A, <mask> AllenNLP sentence."""
__UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 320
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = StableDiffusionXLImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__a = PipelineTesterMixin.required_optional_params - {"""latents"""}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__UpperCAmelCase : Optional[Any] = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
__UpperCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
__UpperCAmelCase : str = CLIPTextModel(UpperCamelCase )
__UpperCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCamelCase )
__UpperCAmelCase : List[str] = CLIPTextModelWithProjection(UpperCamelCase )
__UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any=0 ):
'''simple docstring'''
__UpperCAmelCase : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__UpperCAmelCase : int = image / 2 + 0.5
if str(UpperCamelCase ).startswith("""mps""" ):
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(UpperCamelCase )
else:
__UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__UpperCAmelCase : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline(**UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
__UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase )
__UpperCAmelCase : int = sd_pipe(**UpperCamelCase ).images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : str = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**UpperCamelCase )
__UpperCAmelCase : str = sd_pipe.to(UpperCamelCase )
__UpperCAmelCase : List[str] = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
# forward without prompt embeds
__UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase )
__UpperCAmelCase : Dict = 3 * ["""this is a negative prompt"""]
__UpperCAmelCase : Optional[Any] = negative_prompt
__UpperCAmelCase : str = 3 * [inputs["""prompt"""]]
__UpperCAmelCase : Tuple = sd_pipe(**UpperCamelCase )
__UpperCAmelCase : List[str] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase )
__UpperCAmelCase : Optional[int] = 3 * ["""this is a negative prompt"""]
__UpperCAmelCase : int = 3 * [inputs.pop("""prompt""" )]
(
__UpperCAmelCase
) : List[str] = sd_pipe.encode_prompt(UpperCamelCase , negative_prompt=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = sd_pipe(
**UpperCamelCase , prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , pooled_prompt_embeds=UpperCamelCase , negative_pooled_prompt_embeds=UpperCamelCase , )
__UpperCAmelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple="cpu" , UpperCamelCase : Union[str, Any]=torch.floataa , UpperCamelCase : Union[str, Any]=0 ):
'''simple docstring'''
__UpperCAmelCase : str = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__UpperCAmelCase : str = np.random.RandomState(UpperCamelCase ).standard_normal((1, 4, 64, 64) )
__UpperCAmelCase : List[str] = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase , dtype=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__UpperCAmelCase : int = self.get_inputs(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = pipe(**UpperCamelCase ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : Optional[int] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 370
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ):
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Optional[Any] = use_input_mask
__UpperCAmelCase : Tuple = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Optional[int] = block_sizes
__UpperCAmelCase : Optional[Any] = num_decoder_layers
__UpperCAmelCase : Union[str, Any] = d_model
__UpperCAmelCase : Dict = n_head
__UpperCAmelCase : Optional[Any] = d_head
__UpperCAmelCase : Dict = d_inner
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout
__UpperCAmelCase : List[Any] = attention_dropout
__UpperCAmelCase : str = activation_dropout
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : str = 2
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : List[Any] = num_choices
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Dict = initializer_std
# Used in the tests to check the size of the first attention layer
__UpperCAmelCase : Dict = n_head
# Used in the tests to check the size of the first hidden state
__UpperCAmelCase : Dict = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__UpperCAmelCase : List[Any] = self.num_hidden_layers + 2
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[str] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase )
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : List[str] = model(UpperCamelCase )
__UpperCAmelCase : List[Any] = [input_ids, input_mask]
__UpperCAmelCase : Dict = model(UpperCamelCase )
__UpperCAmelCase : Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase )
__UpperCAmelCase : List[str] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase )
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase )
__UpperCAmelCase : int = [input_ids, input_mask]
__UpperCAmelCase : int = model(UpperCamelCase )
__UpperCAmelCase : List[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__UpperCAmelCase : int = False
__UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase )
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase )
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : int = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase )
__UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__UpperCAmelCase : int = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase )
__UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : int = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase )
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Any = model(UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,
) : Dict = config_and_inputs
__UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__a = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__a = False
__a = False
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFFunnelModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
@require_tf
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__a = False
__a = False
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
| 320
| 0
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = tmp_path / """cache"""
__UpperCAmelCase : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : Union[str, Any] = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase ).read()
_check_json_dataset(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Any = tmp_path / """cache"""
__UpperCAmelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
__UpperCAmelCase : List[str] = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : Optional[int] = JsonDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_json_dataset(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Tuple = tmp_path / """cache"""
__UpperCAmelCase : Any = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
__UpperCAmelCase : List[Any] = features.copy() if features else default_expected_features
__UpperCAmelCase : Tuple = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : Tuple = JsonDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[Any] = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
__UpperCAmelCase : str = features.copy()
__UpperCAmelCase : str = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Tuple = JsonDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = tmp_path / """cache"""
__UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Optional[int] = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase , split=_UpperCamelCase ).read()
_check_json_dataset(_UpperCamelCase , _UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
if issubclass(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : int = jsonl_path
elif issubclass(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : str = [jsonl_path]
__UpperCAmelCase : Any = tmp_path / """cache"""
__UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : List[Any] = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_json_dataset(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str=("train",) ) -> List[Any]:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase )
for split in splits:
__UpperCAmelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : int = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase ).read()
_check_json_datasetdict(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path / """cache"""
__UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Optional[Any] = features.copy() if features else default_expected_features
__UpperCAmelCase : str = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = JsonDatasetReader({"""train""": jsonl_path} , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_json_datasetdict(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
if split:
__UpperCAmelCase : Dict = {split: jsonl_path}
else:
__UpperCAmelCase : Dict = """train"""
__UpperCAmelCase : str = {"""train""": jsonl_path, """test""": jsonl_path}
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Tuple = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_json_datasetdict(_UpperCamelCase , _UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
return json.load(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
return [json.loads(_UpperCamelCase ) for line in buffer]
class lowerCamelCase__ :
"""simple docstring"""
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write()
buffer.seek(0 )
__UpperCAmelCase : Optional[Any] = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Any ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write()
buffer.seek(0 )
__UpperCAmelCase : int = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase : Optional[Any] = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase : int = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
def lowerCamelCase__ ( self : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
with pytest.raises(UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / f'''test.json.{extension}'''
__UpperCAmelCase : Union[str, Any] = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write()
with fsspec.open(UpperCamelCase , """rb""" , compression="""infer""" ) as f:
__UpperCAmelCase : Tuple = f.read()
with fsspec.open(UpperCamelCase , """rb""" , compression="""infer""" ) as f:
__UpperCAmelCase : List[Any] = f.read()
assert exported_content == original_content
| 371
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
while b > 0:
if b & 1:
__UpperCAmelCase : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 320
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=2 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> int:
'''simple docstring'''
lowercase_ : int = parent
lowercase_ : Union[str, Any] = 13
lowercase_ : Union[str, Any] = 7
lowercase_ : Any = True
lowercase_ : Any = True
lowercase_ : List[Any] = True
lowercase_ : Optional[Any] = True
lowercase_ : Tuple = 99
lowercase_ : Tuple = 384
lowercase_ : List[Any] = 2
lowercase_ : Dict = 4
lowercase_ : Tuple = 37
lowercase_ : List[Any] = 'gelu'
lowercase_ : List[Any] = 0.1
lowercase_ : int = 0.1
lowercase_ : List[str] = 512
lowercase_ : Optional[int] = 16
lowercase_ : str = 2
lowercase_ : Dict = 0.02
lowercase_ : Optional[Any] = 3
lowercase_ : Optional[Any] = 4
lowercase_ : int = 128
lowercase_ : List[str] = 2
lowercase_ : Any = 9
lowercase_ : List[str] = 1
lowercase_ : str = None
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[Any] = None
if self.use_input_mask:
lowercase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Dict = None
if self.use_token_type_ids:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase_ : Optional[Any] = None
lowercase_ : Optional[Any] = None
lowercase_ : int = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : List[str] = ConvBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=__UpperCamelCase ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = TFConvBertModel(config=__UpperCamelCase )
lowercase_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : Tuple = [input_ids, input_mask]
lowercase_ : Any = model(__UpperCamelCase )
lowercase_ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = TFConvBertForMaskedLM(config=__UpperCamelCase )
lowercase_ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowercase_ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Any = self.num_labels
lowercase_ : List[Any] = TFConvBertForSequenceClassification(config=__UpperCamelCase )
lowercase_ : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowercase_ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : str = self.num_choices
lowercase_ : Dict = TFConvBertForMultipleChoice(config=__UpperCamelCase )
lowercase_ : Optional[Any] = tf.tile(tf.expand_dims(__UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowercase_ : List[Any] = tf.tile(tf.expand_dims(__UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowercase_ : Dict = tf.tile(tf.expand_dims(__UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowercase_ : List[str] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase_ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.num_labels
lowercase_ : Any = TFConvBertForTokenClassification(config=__UpperCamelCase )
lowercase_ : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowercase_ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = TFConvBertForQuestionAnswering(config=__UpperCamelCase )
lowercase_ : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowercase_ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[Any] = config_and_inputs
lowercase_ : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = TFConvBertModelTester(self )
lowercase_ : Any = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = True
lowercase_ : Optional[Any] = True
if hasattr(__UpperCamelCase ,'use_cache' ):
lowercase_ : str = True
lowercase_ : str = getattr(self.model_tester ,'encoder_seq_length' ,self.model_tester.seq_length )
lowercase_ : Union[str, Any] = getattr(self.model_tester ,'key_length' ,__UpperCamelCase )
for model_class in self.all_model_classes:
lowercase_ : Any = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = model_class(__UpperCamelCase )
lowercase_ : Dict = len(model(__UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase ,saved_model=__UpperCamelCase )
lowercase_ : Optional[int] = os.path.join(__UpperCamelCase ,'saved_model' ,'1' )
lowercase_ : Optional[Any] = tf.keras.models.load_model(__UpperCamelCase )
lowercase_ : Dict = model(__UpperCamelCase )
if self.is_encoder_decoder:
lowercase_ : Optional[Any] = outputs['encoder_hidden_states']
lowercase_ : List[str] = outputs['encoder_attentions']
else:
lowercase_ : int = outputs['hidden_states']
lowercase_ : List[str] = outputs['attentions']
self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase )
lowercase_ : Dict = getattr(
self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) ,[self.model_tester.seq_length, self.model_tester.hidden_size] ,)
self.assertEqual(len(__UpperCamelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[int] = True
lowercase_ : str = getattr(self.model_tester ,'decoder_seq_length' ,self.model_tester.seq_length )
lowercase_ : Union[str, Any] = getattr(self.model_tester ,'encoder_seq_length' ,self.model_tester.seq_length )
lowercase_ : str = getattr(self.model_tester ,'key_length' ,__UpperCamelCase )
lowercase_ : Optional[Any] = getattr(self.model_tester ,'key_length' ,__UpperCamelCase )
def check_decoder_attentions_output(__UpperCamelCase ):
lowercase_ : str = len(__UpperCamelCase )
self.assertEqual(out_len % 2 ,0 )
lowercase_ : List[Any] = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] ,)
def check_encoder_attentions_output(__UpperCamelCase ):
lowercase_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCamelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = True
lowercase_ : Any = False
lowercase_ : Optional[int] = model_class(__UpperCamelCase )
lowercase_ : List[str] = model(self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : Tuple = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states ,__UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
lowercase_ : Optional[Any] = model_class(__UpperCamelCase )
lowercase_ : Any = model(self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
self.assertEqual(config.output_hidden_states ,__UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase_ : Tuple = True
lowercase_ : Optional[Any] = model_class(__UpperCamelCase )
lowercase_ : Dict = model(self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
self.assertEqual(config.output_hidden_states ,__UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
lowercase_ : Optional[Any] = True
lowercase_ : str = True
lowercase_ : List[Any] = model_class(__UpperCamelCase )
lowercase_ : str = model(self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states ,__UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
lowercase_ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[Any] = [1, 6, 768]
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 )
| 321
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 1
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowercase__( __SCREAMING_SNAKE_CASE : BertModel , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
lowercase_ : int = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
os.makedirs(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = model.state_dict()
def to_tf_var_name(__SCREAMING_SNAKE_CASE : str ):
for patt, repl in iter(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = name.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return F'''bert/{name}'''
def create_tf_var(__SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : tf.Session ):
lowercase_ : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
lowercase_ : Optional[int] = tf.get_variable(dtype=__SCREAMING_SNAKE_CASE , shape=tensor.shape , name=__SCREAMING_SNAKE_CASE , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__SCREAMING_SNAKE_CASE )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase_ : Tuple = to_tf_var_name(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase_ : Optional[int] = torch_tensor.T
lowercase_ : str = create_tf_var(tensor=__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE , session=__SCREAMING_SNAKE_CASE )
tf.keras.backend.set_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = session.run(__SCREAMING_SNAKE_CASE )
print(F'''Successfully created {tf_name}: {np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}''' )
lowercase_ : Optional[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , model_name.replace('-' , '_' ) + '.ckpt' ) )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any]=None ):
lowercase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='Directory in which to save tensorflow model' )
lowercase_ : Optional[int] = parser.parse_args(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__SCREAMING_SNAKE_CASE , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 321
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
| 1
|
"""simple docstring"""
import os
def lowercase__( __SCREAMING_SNAKE_CASE : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ) as in_file:
lowercase_ : Optional[Any] = in_file.read()
lowercase_ : Dict = [[int(__SCREAMING_SNAKE_CASE ) for cell in row.split(',' )] for row in data.strip().splitlines()]
lowercase_ : List[Any] = [[0 for cell in row] for row in grid]
lowercase_ : Dict = len(grid[0] )
lowercase_ : Optional[int] = [[0 for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
lowercase_ : List[str] = grid[0][0]
for i in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = grid[i][0] + dp[i - 1][0]
for i in range(1 , __SCREAMING_SNAKE_CASE ):
for j in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"{solution() = }")
| 321
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
| 1
|
"""simple docstring"""
import unittest
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray | None = None , ):
lowercase_ : Tuple = np.shape(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = np.shape(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = np.shape(__SCREAMING_SNAKE_CASE )
if shape_a[0] != shape_b[0]:
lowercase_ : Tuple = (
'Expected the same number of rows for A and B. '
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if shape_b[1] != shape_c[1]:
lowercase_ : int = (
'Expected the same number of columns for B and C. '
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : Optional[int] = np.linalg.inv(__SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : List[Any] = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Any = np.linalg.det(__UpperCamelCase )
lowercase_ : Any = np.linalg.det(__UpperCamelCase )
lowercase_ : Any = np.linalg.det(__UpperCamelCase )
self.assertAlmostEqual(__UpperCamelCase ,det_a * det_s )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__UpperCamelCase ):
schur_complement(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Tuple = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__UpperCamelCase ):
schur_complement(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 321
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
| 1
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321
| 1
|
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
def __init__( self ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['bs4'] )
super().__init__(**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
lowercase_ : int = []
lowercase_ : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase_ : Optional[int] = parent.find_all(child.name ,recursive=__UpperCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__UpperCamelCase ) else next(i for i, s in enumerate(__UpperCamelCase ,1 ) if s is child ) )
lowercase_ : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = BeautifulSoup(__UpperCamelCase ,'html.parser' )
lowercase_ : List[Any] = []
lowercase_ : List[str] = []
lowercase_ : Optional[Any] = []
for element in html_code.descendants:
if type(__UpperCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase_ : Dict = html.unescape(__UpperCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__UpperCamelCase )
lowercase_ , lowercase_ : str = self.xpath_soup(__UpperCamelCase )
stringaxtag_seq.append(__UpperCamelCase )
stringaxsubs_seq.append(__UpperCamelCase )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = ''
for tagname, subs in zip(__UpperCamelCase ,__UpperCamelCase ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self ,__UpperCamelCase ) -> BatchFeature:
'''simple docstring'''
lowercase_ : Optional[int] = False
# Check that strings has a valid type
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Optional[Any] = True
elif isinstance(__UpperCamelCase ,(list, tuple) ):
if len(__UpperCamelCase ) == 0 or isinstance(html_strings[0] ,__UpperCamelCase ):
lowercase_ : Tuple = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f'''but is of type {type(__UpperCamelCase )}.''' )
lowercase_ : Optional[Any] = bool(isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(html_strings[0] ,__UpperCamelCase )) )
if not is_batched:
lowercase_ : Any = [html_strings]
# Get nodes + xpaths
lowercase_ : int = []
lowercase_ : Tuple = []
for html_string in html_strings:
lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.get_three_from_single(__UpperCamelCase )
nodes.append(__UpperCamelCase )
lowercase_ : Optional[int] = []
for node, tag_list, sub_list in zip(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Tuple = self.construct_xpath(__UpperCamelCase ,__UpperCamelCase )
xpath_strings.append(__UpperCamelCase )
xpaths.append(__UpperCamelCase )
# return as Dict
lowercase_ : Dict = {'nodes': nodes, 'xpaths': xpaths}
lowercase_ : int = BatchFeature(data=__UpperCamelCase ,tensor_type=__UpperCamelCase )
return encoded_inputs
| 321
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ):
lowercase_ : int = 'backbone.' if is_semantic else ''
lowercase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
for i in range(config.num_hidden_layers ):
lowercase_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = q_bias
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase_ : Tuple = gamma_a
lowercase_ : List[Any] = gamma_a
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = val
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ):
lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Any = 10_24
lowercase_ : List[str] = 40_96
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Optional[Any] = 16
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : int = 'rvlcdip-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase_ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : str = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : int = encoding['pixel_values']
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits
# verify logits
lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321
| 1
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=True ,__UpperCamelCase=1 / 255 ,__UpperCamelCase=True ,) -> List[str]:
'''simple docstring'''
lowercase_ : str = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowercase_ : Dict = parent
lowercase_ : str = batch_size
lowercase_ : Dict = num_channels
lowercase_ : str = min_resolution
lowercase_ : Optional[Any] = max_resolution
lowercase_ : Any = do_resize
lowercase_ : Optional[int] = size
lowercase_ : Union[str, Any] = do_normalize
lowercase_ : Any = image_mean
lowercase_ : List[Any] = image_std
lowercase_ : Tuple = do_rescale
lowercase_ : Optional[int] = rescale_factor
lowercase_ : List[Any] = do_pad
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
if not batched:
lowercase_ : Optional[Any] = image_inputs[0]
if isinstance(__UpperCamelCase ,Image.Image ):
lowercase_ , lowercase_ : Optional[Any] = image.size
else:
lowercase_ , lowercase_ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowercase_ : Tuple = int(self.size['shortest_edge'] * h / w )
lowercase_ : Tuple = self.size['shortest_edge']
elif w > h:
lowercase_ : int = self.size['shortest_edge']
lowercase_ : Any = int(self.size['shortest_edge'] * w / h )
else:
lowercase_ : str = self.size['shortest_edge']
lowercase_ : Union[str, Any] = self.size['shortest_edge']
else:
lowercase_ : int = []
for image in image_inputs:
lowercase_ , lowercase_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Union[str, Any] = max(__UpperCamelCase ,key=lambda __UpperCamelCase : item[0] )[0]
lowercase_ : Any = max(__UpperCamelCase ,key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = ConditionalDetrImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase ,'image_mean' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'image_std' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'do_normalize' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'size' ) )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__UpperCamelCase )
lowercase_ : Any = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size ,{'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,Image.Image )
# Test not batched input
lowercase_ : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase_ , lowercase_ : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase ,batched=__UpperCamelCase )
lowercase_ : Dict = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,np.ndarray )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase_ : Any = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
lowercase_ , lowercase_ : str = self.image_processor_tester.get_expected_values(__UpperCamelCase ,batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,torch.Tensor )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
lowercase_ , lowercase_ : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase_ : List[str] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
lowercase_ , lowercase_ : List[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase ,batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
lowercase_ : Optional[int] = json.loads(f.read() )
lowercase_ : str = {'image_id': 3_9769, 'annotations': target}
# encode them
lowercase_ : Any = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
lowercase_ : Dict = image_processing(images=__UpperCamelCase ,annotations=__UpperCamelCase ,return_tensors='pt' )
# verify pixel values
lowercase_ : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
# verify area
lowercase_ : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__UpperCamelCase ) )
# verify boxes
lowercase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__UpperCamelCase )
lowercase_ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__UpperCamelCase ,atol=1e-3 ) )
# verify image_id
lowercase_ : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__UpperCamelCase ) )
# verify is_crowd
lowercase_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__UpperCamelCase ) )
# verify class_labels
lowercase_ : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__UpperCamelCase ) )
# verify orig_size
lowercase_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__UpperCamelCase ) )
# verify size
lowercase_ : Dict = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__UpperCamelCase ) )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
lowercase_ : Dict = json.loads(f.read() )
lowercase_ : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
lowercase_ : Dict = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase_ : Optional[int] = ConditionalDetrImageProcessor(format='coco_panoptic' )
lowercase_ : Optional[Any] = image_processing(images=__UpperCamelCase ,annotations=__UpperCamelCase ,masks_path=__UpperCamelCase ,return_tensors='pt' )
# verify pixel values
lowercase_ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
# verify area
lowercase_ : List[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__UpperCamelCase ) )
# verify boxes
lowercase_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__UpperCamelCase )
lowercase_ : Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__UpperCamelCase ,atol=1e-3 ) )
# verify image_id
lowercase_ : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__UpperCamelCase ) )
# verify is_crowd
lowercase_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__UpperCamelCase ) )
# verify class_labels
lowercase_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__UpperCamelCase ) )
# verify masks
lowercase_ : List[str] = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__UpperCamelCase )
# verify orig_size
lowercase_ : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__UpperCamelCase ) )
# verify size
lowercase_ : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__UpperCamelCase ) )
| 321
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCamelCase ( lowercase_ ):
lowercase = ['image_processor', 'feature_extractor']
lowercase = 'TvltImageProcessor'
lowercase = 'TvltFeatureExtractor'
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
super().__init__(image_processor=__UpperCamelCase ,feature_extractor=__UpperCamelCase )
lowercase_ : int = image_processor
lowercase_ : str = feature_extractor
def __call__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=False ,__UpperCamelCase=False ,*__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
lowercase_ : Dict = None
if images is not None:
lowercase_ : int = self.image_processor(__UpperCamelCase ,mask_pixel=__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase )
if images_mixed is not None:
lowercase_ : str = self.image_processor(__UpperCamelCase ,is_mixed=__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase )
if audio is not None:
lowercase_ : Optional[Any] = self.feature_extractor(
__UpperCamelCase ,*__UpperCamelCase ,sampling_rate=__UpperCamelCase ,mask_audio=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : Union[str, Any] = {}
if audio is not None:
output_dict.update(__UpperCamelCase )
if images is not None:
output_dict.update(__UpperCamelCase )
if images_mixed_dict is not None:
output_dict.update(__UpperCamelCase )
return output_dict
@property
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : str = self.image_processor.model_input_names
lowercase_ : str = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 321
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 321
| 1
|
"""simple docstring"""
import random
def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ , lowercase_ , lowercase_ : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowercase_ : Any = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
lowercase_ : List[Any] = 0
lowercase_ , lowercase_ , lowercase_ : int = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'van'
def __init__( self ,__UpperCamelCase=224 ,__UpperCamelCase=3 ,__UpperCamelCase=[7, 3, 3, 3] ,__UpperCamelCase=[4, 2, 2, 2] ,__UpperCamelCase=[64, 128, 320, 512] ,__UpperCamelCase=[3, 3, 12, 3] ,__UpperCamelCase=[8, 8, 4, 4] ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-6 ,__UpperCamelCase=1e-2 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,**__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
lowercase_ : List[str] = image_size
lowercase_ : Any = num_channels
lowercase_ : Dict = patch_sizes
lowercase_ : Optional[int] = strides
lowercase_ : Any = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Any = mlp_ratios
lowercase_ : List[str] = hidden_act
lowercase_ : Tuple = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : int = layer_scale_init_value
lowercase_ : List[Any] = drop_path_rate
lowercase_ : Any = dropout_rate
| 321
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BlenderbotTokenizer
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) )
lowercase_ : Any = add_prefix_space
lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase )
lowercase_ : int = add_prefix_space
lowercase_ : Any = 'post_processor'
lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
if tokenizer_component_instance:
lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : str = tuple(state['sep'] )
if "cls" in state:
lowercase_ : Union[str, Any] = tuple(state['cls'] )
lowercase_ : str = False
if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Dict = add_prefix_space
lowercase_ : int = True
if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets:
lowercase_ : Optional[Any] = trim_offsets
lowercase_ : Tuple = True
if changes_to_apply:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) )
lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value
lowercase_ : str = value
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : int = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
lowercase_ : Dict = ' '.join(__UpperCamelCase )
lowercase_ : str = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
lowercase_ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 321
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__SCREAMING_SNAKE_CASE =logging.getLogger(__name__)
@dataclass
class UpperCamelCase ( lowercase_ ):
lowercase = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
lowercase = field(default=lowercase_ , metadata={'help': 'Whether to SortishSamler or not.'} )
lowercase = field(
default=lowercase_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowercase = field(default=lowercase_ , metadata={'help': 'whether to use adafactor'} )
lowercase = field(
default=lowercase_ , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
lowercase = field(
default=lowercase_ , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
lowercase = field(default=lowercase_ , metadata={'help': 'Dropout probability. Goes into model.config.'} )
lowercase = field(
default=lowercase_ , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
lowercase = field(
default='linear' , metadata={'help': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 321
|
"""simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'}
lowercase_ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : Any = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowercase_ : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Tuple = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowercase_ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
| 321
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321
| 1
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Optional[Any] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : int = num_labels
lowercase_ : Any = num_choices
lowercase_ : int = scope
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : Tuple = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = EsmModelTester(self )
lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : List[Any] = torch.empty(2 ,4 ,30 )
lowercase_ : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase ( lowercase_ ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = 33
lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Any = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' ,from_pt=__UpperCamelCase ,dtype=jnp.bfloataa )
lowercase_ , lowercase_ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,controlnet=__UpperCamelCase ,from_pt=__UpperCamelCase ,dtype=jnp.bfloataa )
lowercase_ : Union[str, Any] = controlnet_params
lowercase_ : Tuple = 'bird'
lowercase_ : Optional[int] = jax.device_count()
lowercase_ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase_ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase_ : List[Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase_ : Tuple = jax.random.PRNGKey(0 )
lowercase_ : List[Any] = jax.random.split(__UpperCamelCase ,jax.device_count() )
lowercase_ : int = replicate(__UpperCamelCase )
lowercase_ : str = shard(__UpperCamelCase )
lowercase_ : List[Any] = shard(__UpperCamelCase )
lowercase_ : List[str] = pipe(
prompt_ids=__UpperCamelCase ,image=__UpperCamelCase ,params=__UpperCamelCase ,prng_seed=__UpperCamelCase ,num_inference_steps=50 ,jit=__UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase_ : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase_ : Tuple = images[0, 253:256, 253:256, -1]
lowercase_ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase_ : Tuple = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' ,from_pt=__UpperCamelCase ,dtype=jnp.bfloataa )
lowercase_ , lowercase_ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,controlnet=__UpperCamelCase ,from_pt=__UpperCamelCase ,dtype=jnp.bfloataa )
lowercase_ : Any = controlnet_params
lowercase_ : Dict = 'Chef in the kitchen'
lowercase_ : Optional[Any] = jax.device_count()
lowercase_ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase_ : List[str] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase_ : Any = jax.random.PRNGKey(0 )
lowercase_ : str = jax.random.split(__UpperCamelCase ,jax.device_count() )
lowercase_ : Optional[int] = replicate(__UpperCamelCase )
lowercase_ : Union[str, Any] = shard(__UpperCamelCase )
lowercase_ : Tuple = shard(__UpperCamelCase )
lowercase_ : Union[str, Any] = pipe(
prompt_ids=__UpperCamelCase ,image=__UpperCamelCase ,params=__UpperCamelCase ,prng_seed=__UpperCamelCase ,num_inference_steps=50 ,jit=__UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase_ : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase_ : str = images[0, 253:256, 253:256, -1]
lowercase_ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase_ : Tuple = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 321
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'falcon'
lowercase = ['past_key_values']
def __init__( self ,__UpperCamelCase=6_5024 ,__UpperCamelCase=4544 ,__UpperCamelCase=32 ,__UpperCamelCase=71 ,__UpperCamelCase=1e-5 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=None ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=11 ,__UpperCamelCase=11 ,**__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = vocab_size
# Backward compatibility with n_embed kwarg
lowercase_ : Union[str, Any] = kwargs.pop('n_embed' ,__UpperCamelCase )
lowercase_ : Optional[Any] = hidden_size if n_embed is None else n_embed
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : str = layer_norm_epsilon
lowercase_ : Any = initializer_range
lowercase_ : List[str] = use_cache
lowercase_ : Any = hidden_dropout
lowercase_ : Optional[Any] = attention_dropout
lowercase_ : Any = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
lowercase_ : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase_ : str = alibi
lowercase_ : Tuple = new_decoder_architecture
lowercase_ : Dict = multi_query # Ignored when new_decoder_architecture is True
lowercase_ : Optional[int] = parallel_attn
lowercase_ : Union[str, Any] = bias
super().__init__(bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,**__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return not self.alibi
| 321
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321
| 1
|
"""simple docstring"""
import math
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Tuple = 0.0
lowercase_ : Optional[Any] = 0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) ,2 )
da += math.pow((sample[i] - weights[1][i]) ,2 )
return 0 if da > da else 1
return 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> list[list[int | float]]:
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__( ):
# Training Examples ( m, n )
lowercase_ : List[Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowercase_ : str = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowercase_ : str = SelfOrganizingMap()
lowercase_ : Optional[int] = 3
lowercase_ : int = 0.5
for _ in range(__SCREAMING_SNAKE_CASE ):
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
# training sample
lowercase_ : str = training_samples[j]
# Compute the winning vector
lowercase_ : Tuple = self_organizing_map.get_winner(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Update the winning vector
lowercase_ : Optional[int] = self_organizing_map.update(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# classify test sample
lowercase_ : List[Any] = [0, 0, 0, 1]
lowercase_ : Optional[int] = self_organizing_map.get_winner(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 321
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__SCREAMING_SNAKE_CASE ={
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BartTokenizer
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : str = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) )
lowercase_ : Optional[Any] = add_prefix_space
lowercase_ : str = pre_tok_class(**__UpperCamelCase )
lowercase_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ : int = 'post_processor'
lowercase_ : Any = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
if tokenizer_component_instance:
lowercase_ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : str = tuple(state['sep'] )
if "cls" in state:
lowercase_ : List[str] = tuple(state['cls'] )
lowercase_ : Optional[Any] = False
if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : List[str] = add_prefix_space
lowercase_ : List[Any] = True
if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets:
lowercase_ : List[str] = trim_offsets
lowercase_ : List[str] = True
if changes_to_apply:
lowercase_ : str = getattr(__UpperCamelCase ,state.pop('type' ) )
lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value
lowercase_ : List[Any] = value
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : int = kwargs.get('is_split_into_words' ,__UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Optional[Any] = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 321
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'luke'
def __init__( self ,__UpperCamelCase=5_0267 ,__UpperCamelCase=50_0000 ,__UpperCamelCase=768 ,__UpperCamelCase=256 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=1 ,__UpperCamelCase=0 ,__UpperCamelCase=2 ,**__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : Optional[int] = vocab_size
lowercase_ : Dict = entity_vocab_size
lowercase_ : int = hidden_size
lowercase_ : Tuple = entity_emb_size
lowercase_ : str = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Dict = hidden_act
lowercase_ : Dict = intermediate_size
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Dict = type_vocab_size
lowercase_ : List[str] = initializer_range
lowercase_ : int = layer_norm_eps
lowercase_ : Tuple = use_entity_aware_attention
lowercase_ : List[str] = classifier_dropout
| 321
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 1
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
# word like '180' or '身高' or '神'
for char in word:
lowercase_ : List[str] = ord(__SCREAMING_SNAKE_CASE )
if not _is_chinese_char(__SCREAMING_SNAKE_CASE ):
return 0
return 1
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Dict = set()
for token in tokens:
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) > 1 and is_chinese(__SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = list(__SCREAMING_SNAKE_CASE )
return word_list
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : set() ):
if not chinese_word_set:
return bert_tokens
lowercase_ : Optional[Any] = max([len(__SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
lowercase_ : List[Any] = bert_tokens
lowercase_ , lowercase_ : Tuple = 0, len(__SCREAMING_SNAKE_CASE )
while start < end:
lowercase_ : int = True
if is_chinese(bert_word[start] ):
lowercase_ : Optional[int] = min(end - start , __SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE , 1 , -1 ):
lowercase_ : Optional[int] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ : Union[str, Any] = '##' + bert_word[j]
lowercase_ : Tuple = start + i
lowercase_ : str = False
break
if single_word:
start += 1
return bert_word
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : LTP , __SCREAMING_SNAKE_CASE : BertTokenizer ):
lowercase_ : List[str] = []
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 1_00 ):
lowercase_ : str = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['cws'] ).cws
lowercase_ : Optional[int] = [get_chinese_word(__SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = []
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 1_00 ):
lowercase_ : int = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = []
for input_ids, chinese_word in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : int = []
for id in input_ids:
lowercase_ : str = bert_tokenizer._convert_id_to_token(__SCREAMING_SNAKE_CASE )
input_tokens.append(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = add_sub_symbol(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
lowercase_ : Union[str, Any] = token[2:]
# save chinese tokens' pos
if len(__SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(__SCREAMING_SNAKE_CASE ) ):
ref_id.append(__SCREAMING_SNAKE_CASE )
ref_ids.append(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
return ref_ids
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
lowercase_ : int = f.readlines()
lowercase_ : Dict = [line.strip() for line in data if len(__SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ : int = LTP(args.ltp ) # faster in GPU device
lowercase_ : Optional[Any] = BertTokenizer.from_pretrained(args.bert )
lowercase_ : List[Any] = prepare_ref(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
lowercase_ : Dict = [json.dumps(__SCREAMING_SNAKE_CASE ) + '\n' for ref in ref_ids]
f.writelines(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
main(args)
| 321
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,) -> Dict:
'''simple docstring'''
lowercase_ : Optional[Any] = parent
lowercase_ : str = 13
lowercase_ : Tuple = 7
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : List[str] = True
lowercase_ : Dict = 99
lowercase_ : int = 32
lowercase_ : int = 2
lowercase_ : Tuple = 4
lowercase_ : str = 37
lowercase_ : Tuple = 'gelu'
lowercase_ : List[str] = 0.1
lowercase_ : Optional[Any] = 0.1
lowercase_ : int = 512
lowercase_ : Tuple = 16
lowercase_ : Dict = 2
lowercase_ : List[Any] = 0.02
lowercase_ : List[Any] = 3
lowercase_ : str = 4
lowercase_ : List[Any] = None
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Optional[Any] = None
lowercase_ : str = None
lowercase_ : Dict = None
if self.use_labels:
lowercase_ : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : str = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : Dict = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : int = self.prepare_config_and_inputs()
lowercase_ : Tuple = True
lowercase_ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = TFEsmModel(config=__UpperCamelCase )
lowercase_ : int = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase_ : int = model(__UpperCamelCase )
lowercase_ : Tuple = [input_ids, input_mask]
lowercase_ : Optional[Any] = model(__UpperCamelCase )
lowercase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = True
lowercase_ : List[Any] = TFEsmModel(config=__UpperCamelCase )
lowercase_ : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowercase_ : str = model(__UpperCamelCase )
lowercase_ : str = [input_ids, input_mask]
lowercase_ : List[Any] = model(__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase )
# Also check the case where encoder outputs are not passed
lowercase_ : Optional[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = TFEsmForMaskedLM(config=__UpperCamelCase )
lowercase_ : Tuple = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : Optional[Any] = TFEsmForTokenClassification(config=__UpperCamelCase )
lowercase_ : str = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase_ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : List[Any] = config_and_inputs
lowercase_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Any = TFEsmModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = TFEsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip('Protein models do not support embedding resizing.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(__UpperCamelCase )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase_ : Optional[Any] = model.get_bias()
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
for k, v in name.items():
assert isinstance(__UpperCamelCase ,tf.Variable )
else:
lowercase_ : Any = model.get_output_embeddings()
assert x is None
lowercase_ : List[Any] = model.get_bias()
assert name is None
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowercase_ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,__UpperCamelCase )
# compare the actual values for a slice.
lowercase_ : Tuple = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowercase_ : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Optional[int] = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 321
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321
| 1
|
"""simple docstring"""
import datasets
__SCREAMING_SNAKE_CASE ="\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
__SCREAMING_SNAKE_CASE ="\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
__SCREAMING_SNAKE_CASE ="\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return {"accuracy": simple_accuracy(__UpperCamelCase ,__UpperCamelCase )}
| 321
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=3 ,__UpperCamelCase=32 ,__UpperCamelCase=3 ,__UpperCamelCase=10 ,__UpperCamelCase=[10, 20, 30, 40] ,__UpperCamelCase=[1, 1, 2, 1] ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase="relu" ,__UpperCamelCase=3 ,__UpperCamelCase=None ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[Any] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = embeddings_size
lowercase_ : Any = hidden_sizes
lowercase_ : Tuple = depths
lowercase_ : int = is_training
lowercase_ : List[Any] = use_labels
lowercase_ : str = hidden_act
lowercase_ : str = num_labels
lowercase_ : Dict = scope
lowercase_ : List[str] = len(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Any = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] ,self.num_labels )
lowercase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[Any] = RegNetModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : int = self.num_labels
lowercase_ : Dict = RegNetForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Optional[int] = model(__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : List[Any] = config_and_inputs
lowercase_ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowercase = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Any = RegNetModelTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(__UpperCamelCase )
lowercase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[int] = [*signature.parameters.keys()]
lowercase_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(config=__UpperCamelCase )
for name, module in model.named_modules():
if isinstance(__UpperCamelCase ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ : Any = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : int = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Optional[Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Any = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[Any] = RegNetModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCamelCase )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : str = prepare_img()
lowercase_ : Dict = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : Tuple = model(**__UpperCamelCase )
# verify the logits
lowercase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : int = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Optional[Any] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : int = num_labels
lowercase_ : Any = num_choices
lowercase_ : int = scope
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : Tuple = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = EsmModelTester(self )
lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : List[Any] = torch.empty(2 ,4 ,30 )
lowercase_ : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase ( lowercase_ ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = 33
lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Any = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
from PIL import Image
def lowercase__( __SCREAMING_SNAKE_CASE : Image , __SCREAMING_SNAKE_CASE : float ):
def brightness(__SCREAMING_SNAKE_CASE : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__SCREAMING_SNAKE_CASE =change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 321
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : int = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = 0
while number > 0:
lowercase_ : List[str] = number % 10
sum_of_digits += last_digit
lowercase_ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowercase__( __SCREAMING_SNAKE_CASE : int = 1_00 ):
lowercase_ : Tuple = factorial(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 321
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str ):
# Initialise PyTorch model
lowercase_ : Dict = RemBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print('Building PyTorch model from configuration: {}'.format(str(__SCREAMING_SNAKE_CASE ) ) )
lowercase_ : Optional[Any] = RemBertModel(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print('Save PyTorch model to {}'.format(__SCREAMING_SNAKE_CASE ) )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 321
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
| 1
|
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int=() , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any="no" , __SCREAMING_SNAKE_CASE : Union[str, Any]="29500" ):
lowercase_ : Tuple = False
lowercase_ : Tuple = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowercase_ : Dict = True
elif "IPython" in sys.modules:
lowercase_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowercase_ : Union[str, Any] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , __SCREAMING_SNAKE_CASE ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowercase_ : Optional[Any] = 8
lowercase_ : List[Any] = PrepareForLaunch(__SCREAMING_SNAKE_CASE , distributed_type='TPU' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , nprocs=__SCREAMING_SNAKE_CASE , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*__SCREAMING_SNAKE_CASE )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__SCREAMING_SNAKE_CASE , master_addr='127.0.01' , master_port=__SCREAMING_SNAKE_CASE , mixed_precision=__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = PrepareForLaunch(__SCREAMING_SNAKE_CASE , distributed_type='MULTI_GPU' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , nprocs=__SCREAMING_SNAKE_CASE , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowercase_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any=() , __SCREAMING_SNAKE_CASE : Dict=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__SCREAMING_SNAKE_CASE , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowercase_ : List[Any] = PrepareForLaunch(__SCREAMING_SNAKE_CASE , debug=__SCREAMING_SNAKE_CASE )
start_processes(__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , nprocs=__SCREAMING_SNAKE_CASE , start_method='fork' )
| 321
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : list[float] ):
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
lowercase_ : Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__SCREAMING_SNAKE_CASE ) )
return round(__SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'instructblip_vision_model'
def __init__( self ,__UpperCamelCase=1408 ,__UpperCamelCase=6144 ,__UpperCamelCase=39 ,__UpperCamelCase=16 ,__UpperCamelCase=224 ,__UpperCamelCase=14 ,__UpperCamelCase="gelu" ,__UpperCamelCase=1e-6 ,__UpperCamelCase=0.0 ,__UpperCamelCase=1e-10 ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Dict:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : int = patch_size
lowercase_ : List[Any] = image_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = attention_dropout
lowercase_ : Dict = layer_norm_eps
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = qkv_bias
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__UpperCamelCase )
lowercase_ , lowercase_ : str = cls.get_config_dict(__UpperCamelCase ,**__UpperCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
lowercase_ : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase ,**__UpperCamelCase )
class UpperCamelCase ( lowercase_ ):
lowercase = 'instructblip_qformer'
def __init__( self ,__UpperCamelCase=3_0522 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0 ,__UpperCamelCase="absolute" ,__UpperCamelCase=2 ,__UpperCamelCase=1408 ,**__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : str = vocab_size
lowercase_ : int = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : List[Any] = initializer_range
lowercase_ : Any = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : Optional[Any] = cross_attention_frequency
lowercase_ : Union[str, Any] = encoder_hidden_size
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__UpperCamelCase )
lowercase_ , lowercase_ : List[str] = cls.get_config_dict(__UpperCamelCase ,**__UpperCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
lowercase_ : Union[str, Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase ,**__UpperCamelCase )
class UpperCamelCase ( lowercase_ ):
lowercase = 'instructblip'
lowercase = True
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=32 ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
if vision_config is None:
lowercase_ : Optional[Any] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
lowercase_ : List[str] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
lowercase_ : Union[str, Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
lowercase_ : Union[str, Any] = InstructBlipVisionConfig(**__UpperCamelCase )
lowercase_ : Any = InstructBlipQFormerConfig(**__UpperCamelCase )
lowercase_ : Dict = text_config['model_type'] if 'model_type' in text_config else 'opt'
lowercase_ : Any = CONFIG_MAPPING[text_model_type](**__UpperCamelCase )
lowercase_ : List[Any] = self.text_config.tie_word_embeddings
lowercase_ : Optional[int] = self.text_config.is_encoder_decoder
lowercase_ : Tuple = num_query_tokens
lowercase_ : Any = self.vision_config.hidden_size
lowercase_ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase_ : List[Any] = 1.0
lowercase_ : List[Any] = 0.02
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.vision_config.to_dict()
lowercase_ : Tuple = self.qformer_config.to_dict()
lowercase_ : str = self.text_config.to_dict()
lowercase_ : List[Any] = self.__class__.model_type
return output
| 321
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ):
lowercase_ : int = 'backbone.' if is_semantic else ''
lowercase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
for i in range(config.num_hidden_layers ):
lowercase_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = q_bias
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase_ : Tuple = gamma_a
lowercase_ : List[Any] = gamma_a
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = val
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ):
lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Any = 10_24
lowercase_ : List[str] = 40_96
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Optional[Any] = 16
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : int = 'rvlcdip-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase_ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : str = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : int = encoding['pixel_values']
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits
# verify logits
lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321
| 1
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__SCREAMING_SNAKE_CASE =importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__SCREAMING_SNAKE_CASE =[
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if "://" in dataset_path:
lowercase_ : List[Any] = dataset_path.split('://' )[1]
return dataset_path
def lowercase__( __SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__( __SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Optional[int] = not is_remote_filesystem(__SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__SCREAMING_SNAKE_CASE ) , fs._strip_protocol(__SCREAMING_SNAKE_CASE ) )
else:
fs.mv(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , recursive=__SCREAMING_SNAKE_CASE )
def lowercase__( ):
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Optional[Any] = threading.Lock()
| 321
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321
| 1
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): # picklable for multiprocessing
return x.sum()
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): # picklable for multiprocessing
return i + 1
@dataclass
class UpperCamelCase :
lowercase = 42
lowercase = 42
class UpperCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Dict = {}
lowercase_ : List[Any] = []
lowercase_ : List[Any] = 1
lowercase_ : Union[str, Any] = [1, 2]
lowercase_ : int = {'a': 1, 'b': 2}
lowercase_ : int = {'a': [1, 2], 'b': [3, 4]}
lowercase_ : Dict = {'a': {'1': 1}, 'b': 2}
lowercase_ : List[str] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowercase_ : Union[str, Any] = {}
lowercase_ : Tuple = []
lowercase_ : List[str] = 2
lowercase_ : Optional[int] = [2, 3]
lowercase_ : int = {'a': 2, 'b': 3}
lowercase_ : Any = {'a': [2, 3], 'b': [4, 5]}
lowercase_ : Any = {'a': {'1': 2}, 'b': 3}
lowercase_ : Tuple = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
lowercase_ : Any = 2
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
lowercase_ : str = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
lowercase_ : Dict = {'a': 2, 'b': 0, 'c': 2}
lowercase_ : Tuple = {
'a': np.eye(2 ).astype(__UpperCamelCase ),
'b': np.zeros(3 ).astype(__UpperCamelCase ),
'c': np.ones(2 ).astype(__UpperCamelCase ),
}
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,map_numpy=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__UpperCamelCase ,__UpperCamelCase ,map_numpy=__UpperCamelCase ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,map_numpy=__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__UpperCamelCase ,__UpperCamelCase ,map_numpy=__UpperCamelCase ,num_proc=__UpperCamelCase ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
with self.assertRaises(__UpperCamelCase ): # can't pickle a local lambda
map_nested(lambda __UpperCamelCase : x + 1 ,__UpperCamelCase ,num_proc=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = {'a': 1, 'b': 2}
lowercase_ : int = {'a': 3, 'b': 4}
lowercase_ : Any = {'a': 5, 'b': 6}
lowercase_ : int = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
class UpperCamelCase :
lowercase = 'bar'
lowercase_ : int = Foo()
self.assertEqual(foo.my_attr ,'bar' )
with temporary_assignment(__UpperCamelCase ,'my_attr' ,'BAR' ):
self.assertEqual(foo.my_attr ,'BAR' )
self.assertEqual(foo.my_attr ,'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
lowercase_ : Any = {F'''{i}''': i for i in range(__SCREAMING_SNAKE_CASE )}
lowercase_ : str = map_nested(lambda __SCREAMING_SNAKE_CASE : x + 10 , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class UpperCamelCase ( lowercase_ ):
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
lowercase_ : Union[str, Any] = layers.Dense(2 )
def gen_random_output():
lowercase_ : Optional[Any] = tf.random.uniform((1, 3) )
return model(__UpperCamelCase ).numpy()
with temp_seed(42 ,set_tensorflow=__UpperCamelCase ):
lowercase_ : int = gen_random_output()
with temp_seed(42 ,set_tensorflow=__UpperCamelCase ):
lowercase_ : Union[str, Any] = gen_random_output()
lowercase_ : Any = gen_random_output()
np.testing.assert_equal(__UpperCamelCase ,__UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@require_torch
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
import torch
def gen_random_output():
lowercase_ : List[Any] = torch.nn.Linear(3 ,2 )
lowercase_ : Any = torch.rand(1 ,3 )
return model(__UpperCamelCase ).detach().numpy()
with temp_seed(42 ,set_pytorch=__UpperCamelCase ):
lowercase_ : str = gen_random_output()
with temp_seed(42 ,set_pytorch=__UpperCamelCase ):
lowercase_ : Dict = gen_random_output()
lowercase_ : Optional[int] = gen_random_output()
np.testing.assert_equal(__UpperCamelCase ,__UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 ,3 )
with temp_seed(42 ):
lowercase_ : List[Any] = gen_random_output()
with temp_seed(42 ):
lowercase_ : List[Any] = gen_random_output()
lowercase_ : int = gen_random_output()
np.testing.assert_equal(__UpperCamelCase ,__UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@pytest.mark.parametrize('input_data' , [{}] )
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : str = NestedDataStructure(__SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : str = NestedDataStructure(__SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def lowercase__( ):
lowercase_ : List[str] = A(x=1 , y='foobar' )
lowercase_ : int = {'x': 1, 'y': 'foobar'}
assert asdict(__SCREAMING_SNAKE_CASE ) == expected_output
lowercase_ : Optional[int] = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
lowercase_ : int = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(__SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(__SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y='foo' )] )
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
return text.split()
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase__( ):
with Pool(2 ) as pool:
lowercase_ : List[Any] = list(iflatmap_unordered(__SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(__SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase_ : Optional[Any] = list(iflatmap_unordered(__SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(__SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase_ : List[str] = []
for yield_time, content in iflatmap_unordered(
__SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__SCREAMING_SNAKE_CASE )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(__SCREAMING_SNAKE_CASE ) == 4
| 321
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 321
| 1
|
"""simple docstring"""
from math import ceil
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ : List[Any] = list(range(0 , __SCREAMING_SNAKE_CASE ) )
lowercase_ : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase_ : Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__SCREAMING_SNAKE_CASE )
# Missing blocks
lowercase_ : List[Any] = [i for i in blocks if i not in device_map_blocks]
lowercase_ : Optional[int] = [i for i in device_map_blocks if i not in blocks]
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(__SCREAMING_SNAKE_CASE ) )
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : Optional[Any] = list(range(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) )
lowercase_ : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : int | float | str , __SCREAMING_SNAKE_CASE : int | float | str ):
if nth_term == "":
return [""]
lowercase_ : List[str] = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = int(__SCREAMING_SNAKE_CASE )
lowercase_ : list[str] = []
for temp in range(int(__SCREAMING_SNAKE_CASE ) ):
series.append(F'''1 / {pow(temp + 1 , int(__SCREAMING_SNAKE_CASE ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =int(input("Enter the last number (nth term) of the P-Series"))
__SCREAMING_SNAKE_CASE =int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 321
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BlenderbotTokenizer
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) )
lowercase_ : Any = add_prefix_space
lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase )
lowercase_ : int = add_prefix_space
lowercase_ : Any = 'post_processor'
lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
if tokenizer_component_instance:
lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : str = tuple(state['sep'] )
if "cls" in state:
lowercase_ : Union[str, Any] = tuple(state['cls'] )
lowercase_ : str = False
if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Dict = add_prefix_space
lowercase_ : int = True
if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets:
lowercase_ : Optional[Any] = trim_offsets
lowercase_ : Tuple = True
if changes_to_apply:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) )
lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value
lowercase_ : str = value
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : int = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
lowercase_ : Dict = ' '.join(__UpperCamelCase )
lowercase_ : str = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
lowercase_ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 321
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "spiece.model"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
__SCREAMING_SNAKE_CASE ={
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
__SCREAMING_SNAKE_CASE =0
__SCREAMING_SNAKE_CASE =1
__SCREAMING_SNAKE_CASE =2
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =4
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = 'left'
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<sep>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<cls>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=["<eop>", "<eod>"] ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
lowercase_ : List[str] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token
lowercase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCamelCase ,remove_space=__UpperCamelCase ,keep_accents=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,additional_special_tokens=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCamelCase ,)
lowercase_ : List[Any] = 3
lowercase_ : Optional[int] = do_lower_case
lowercase_ : Optional[int] = remove_space
lowercase_ : Any = keep_accents
lowercase_ : Optional[Any] = vocab_file
lowercase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[str] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = self.__dict__.copy()
lowercase_ : Optional[Any] = None
return state
def __setstate__( self ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Tuple = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
lowercase_ : Any = {}
lowercase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
if self.remove_space:
lowercase_ : Dict = ' '.join(inputs.strip().split() )
else:
lowercase_ : int = inputs
lowercase_ : Optional[Any] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
lowercase_ : List[str] = unicodedata.normalize('NFKD' ,__UpperCamelCase )
lowercase_ : Any = ''.join([c for c in outputs if not unicodedata.combining(__UpperCamelCase )] )
if self.do_lower_case:
lowercase_ : Tuple = outputs.lower()
return outputs
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = self.preprocess_text(__UpperCamelCase )
lowercase_ : Dict = self.sp_model.encode(__UpperCamelCase ,out_type=__UpperCamelCase )
lowercase_ : List[str] = []
for piece in pieces:
if len(__UpperCamelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowercase_ : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCamelCase ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase_ : Dict = cur_pieces[1:]
else:
lowercase_ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCamelCase )
else:
new_pieces.append(__UpperCamelCase )
return new_pieces
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = ''.join(__UpperCamelCase ).replace(__UpperCamelCase ,' ' ).strip()
return out_string
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = True ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
lowercase_ : List[Any] = kwargs.pop('use_source_tokenizer' ,__UpperCamelCase )
lowercase_ : str = self.convert_ids_to_tokens(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase_ : Tuple = []
lowercase_ : Tuple = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) )
lowercase_ : List[str] = []
sub_texts.append(__UpperCamelCase )
else:
current_sub_text.append(__UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowercase_ : int = ''.join(__UpperCamelCase )
lowercase_ : Dict = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase_ : Optional[int] = self.clean_up_tokenization(__UpperCamelCase )
return clean_text
else:
return text
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : str = [self.sep_token_id]
lowercase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1, 1]
return ([0] * len(__UpperCamelCase )) + [1, 1]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : Any = [self.sep_token_id]
lowercase_ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase ,'wb' ) as fi:
lowercase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 321
|
"""simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'}
lowercase_ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : Any = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowercase_ : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Tuple = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowercase_ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
| 321
| 1
|
"""simple docstring"""
import argparse
import struct
import unittest
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Tuple = data
# Initialize hash values
lowercase_ : Optional[int] = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
lowercase_ : List[Any] = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
lowercase_ : List[str] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ) -> bytes:
'''simple docstring'''
lowercase_ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(__UpperCamelCase ) + 8) % 64))
lowercase_ : Optional[Any] = struct.pack('>Q' ,(len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Tuple = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase_ : Dict = list(struct.unpack('>16L' ,__UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase_ : Tuple = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowercase_ : List[str] = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowercase_ : Dict = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
lowercase_ : Tuple = self.ror(__UpperCamelCase ,6 ) ^ self.ror(__UpperCamelCase ,11 ) ^ self.ror(__UpperCamelCase ,25 )
lowercase_ : int = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
lowercase_ : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
lowercase_ : Any = self.ror(__UpperCamelCase ,2 ) ^ self.ror(__UpperCamelCase ,13 ) ^ self.ror(__UpperCamelCase ,22 )
lowercase_ : Optional[int] = (a & b) ^ (a & c) ^ (b & c)
lowercase_ : Any = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
lowercase_ : Dict = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase_ : List[str] = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
lowercase_ : Optional[int] = ''.join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
import hashlib
lowercase_ : str = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash ,hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def lowercase__( ):
import doctest
doctest.testmod()
lowercase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowercase_ : Union[str, Any] = parser.parse_args()
lowercase_ : List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase_ : Union[str, Any] = f.read()
else:
lowercase_ : Optional[Any] = bytes(__SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(__SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 321
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321
| 1
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__SCREAMING_SNAKE_CASE =parse(importlib.metadata.version("torch"))
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Version] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
lowercase_ : Union[str, Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = parse(importlib.metadata.version(__SCREAMING_SNAKE_CASE ) )
return operation(__SCREAMING_SNAKE_CASE , parse(__SCREAMING_SNAKE_CASE ) )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
return compare_versions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__SCREAMING_SNAKE_CASE ={"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : int = ZeroShotClassificationPipeline(
model=__UpperCamelCase ,tokenizer=__UpperCamelCase ,candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[int] = classifier('Who are you voting for in 2020?' ,candidate_labels='politics' )
self.assertEqual(__UpperCamelCase ,{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
# No kwarg
lowercase_ : str = classifier('Who are you voting for in 2020?' ,['politics'] )
self.assertEqual(__UpperCamelCase ,{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
lowercase_ : int = classifier('Who are you voting for in 2020?' ,candidate_labels=['politics'] )
self.assertEqual(__UpperCamelCase ,{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
lowercase_ : str = classifier('Who are you voting for in 2020?' ,candidate_labels='politics, public health' )
self.assertEqual(
__UpperCamelCase ,{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) ,1.0 )
lowercase_ : Any = classifier('Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health'] )
self.assertEqual(
__UpperCamelCase ,{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) ,1.0 )
lowercase_ : Dict = classifier(
'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template='This text is about {}' )
self.assertEqual(__UpperCamelCase ,{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
lowercase_ : Dict = classifier(['I am happy'] ,['positive', 'negative'] )
self.assertEqual(
__UpperCamelCase ,[
{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(1 )
] ,)
lowercase_ : Optional[Any] = classifier(['I am happy', 'I am sad'] ,['positive', 'negative'] )
self.assertEqual(
__UpperCamelCase ,[
{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(2 )
] ,)
with self.assertRaises(__UpperCamelCase ):
classifier('' ,candidate_labels='politics' )
with self.assertRaises(__UpperCamelCase ):
classifier(__UpperCamelCase ,candidate_labels='politics' )
with self.assertRaises(__UpperCamelCase ):
classifier('Who are you voting for in 2020?' ,candidate_labels='' )
with self.assertRaises(__UpperCamelCase ):
classifier('Who are you voting for in 2020?' ,candidate_labels=__UpperCamelCase )
with self.assertRaises(__UpperCamelCase ):
classifier(
'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template='Not formatting template' ,)
with self.assertRaises(__UpperCamelCase ):
classifier(
'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template=__UpperCamelCase ,)
self.run_entailment_id(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = zero_shot_classifier.model.config
lowercase_ : List[str] = config.labelaid
lowercase_ : Optional[int] = zero_shot_classifier.entailment_id
lowercase_ : List[str] = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id ,-1 )
lowercase_ : Tuple = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
lowercase_ : List[str] = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
lowercase_ : Union[str, Any] = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id ,2 )
lowercase_ : Any = original_labelaid
self.assertEqual(__UpperCamelCase ,zero_shot_classifier.entailment_id )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = pipeline(
'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='pt' ,)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 ,candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = pipeline(
'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='pt' ,)
lowercase_ : Dict = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} ,)
@require_tf
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = pipeline(
'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='tf' ,)
lowercase_ : int = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} ,)
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = pipeline('zero-shot-classification' ,model='roberta-large-mnli' ,framework='pt' )
lowercase_ : Optional[int] = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} ,)
lowercase_ : str = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' ,candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] ,multi_label=__UpperCamelCase ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ) ,{
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} ,)
@slow
@require_tf
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = pipeline('zero-shot-classification' ,model='roberta-large-mnli' ,framework='tf' )
lowercase_ : List[Any] = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} ,)
lowercase_ : Optional[int] = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' ,candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] ,multi_label=__UpperCamelCase ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ) ,{
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} ,)
| 321
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
__SCREAMING_SNAKE_CASE ={
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
__SCREAMING_SNAKE_CASE ={
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ConvBertTokenizer
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase=True ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,tokenize_chinese_chars=__UpperCamelCase ,strip_accents=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,__UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' ,__UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,__UpperCamelCase ) != tokenize_chinese_chars
):
lowercase_ : str = getattr(__UpperCamelCase ,normalizer_state.pop('type' ) )
lowercase_ : Union[str, Any] = do_lower_case
lowercase_ : List[str] = strip_accents
lowercase_ : List[str] = tokenize_chinese_chars
lowercase_ : str = normalizer_class(**__UpperCamelCase )
lowercase_ : List[str] = do_lower_case
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : List[str] = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 321
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
# Construct model
if openai_config_file == "":
lowercase_ : Any = OpenAIGPTConfig()
else:
lowercase_ : str = OpenAIGPTConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = OpenAIGPTModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
lowercase_ : Dict = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase_ : Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 321
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
import math
import random
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : bool = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__SCREAMING_SNAKE_CASE =0.02
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : int = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(__SCREAMING_SNAKE_CASE ):
# Forward propagation
lowercase_ : Union[str, Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase_ : List[str] = (expected / 1_00) - layer_a
# Error delta
lowercase_ : Optional[int] = layer_1_error * sigmoid_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =int(input("Expected value: "))
__SCREAMING_SNAKE_CASE =int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 321
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'xglm'
lowercase = ['past_key_values']
lowercase = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,__UpperCamelCase=25_6008 ,__UpperCamelCase=2048 ,__UpperCamelCase=1024 ,__UpperCamelCase=4096 ,__UpperCamelCase=24 ,__UpperCamelCase=16 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=2 ,__UpperCamelCase=1 ,__UpperCamelCase=0 ,__UpperCamelCase=2 ,**__UpperCamelCase ,) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Tuple = d_model
lowercase_ : int = ffn_dim
lowercase_ : Tuple = num_layers
lowercase_ : Optional[int] = attention_heads
lowercase_ : Dict = activation_function
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Union[str, Any] = activation_dropout
lowercase_ : Dict = layerdrop
lowercase_ : Dict = init_std
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = use_cache
super().__init__(
pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,decoder_start_token_id=__UpperCamelCase ,**__UpperCamelCase ,)
| 321
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__SCREAMING_SNAKE_CASE ={
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'albert'
def __init__( self ,__UpperCamelCase=3_0000 ,__UpperCamelCase=128 ,__UpperCamelCase=4096 ,__UpperCamelCase=12 ,__UpperCamelCase=1 ,__UpperCamelCase=64 ,__UpperCamelCase=1_6384 ,__UpperCamelCase=1 ,__UpperCamelCase="gelu_new" ,__UpperCamelCase=0 ,__UpperCamelCase=0 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0.1 ,__UpperCamelCase="absolute" ,__UpperCamelCase=0 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = vocab_size
lowercase_ : List[Any] = embedding_size
lowercase_ : Dict = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : List[str] = num_hidden_groups
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : int = inner_group_num
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : int = intermediate_size
lowercase_ : int = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : List[Any] = initializer_range
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : Tuple = classifier_dropout_prob
lowercase_ : Optional[int] = position_embedding_type
class UpperCamelCase ( lowercase_ ):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 321
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321
| 1
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__SCREAMING_SNAKE_CASE =Lock()
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase_ : Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase_ : Dict = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase_ : Optional[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase_ : Optional[Any] = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Optional[int] = []
lowercase_ : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase_ : Union[str, Any] = Pipe()
lowercase_ : int = Pipe()
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase_ : Any = temp_rs
lowercase_ : Union[str, Any] = temp_rr
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
lowercase_ : Dict = Pipe()
lowercase_ : Tuple = Pipe()
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase_ : str = temp_rs
lowercase_ : int = temp_rr
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE , args=(
len(__SCREAMING_SNAKE_CASE ) - 1,
arr[len(__SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase__( ):
lowercase_ : Optional[int] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = odd_even_transposition(__SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 321
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE ={
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 321
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
| 1
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'detr'
lowercase = ['past_key_values']
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=3 ,__UpperCamelCase=100 ,__UpperCamelCase=6 ,__UpperCamelCase=2048 ,__UpperCamelCase=8 ,__UpperCamelCase=6 ,__UpperCamelCase=2048 ,__UpperCamelCase=8 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=True ,__UpperCamelCase="relu" ,__UpperCamelCase=256 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1.0 ,__UpperCamelCase=False ,__UpperCamelCase="sine" ,__UpperCamelCase="resnet50" ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=1 ,__UpperCamelCase=5 ,__UpperCamelCase=2 ,__UpperCamelCase=1 ,__UpperCamelCase=1 ,__UpperCamelCase=5 ,__UpperCamelCase=2 ,__UpperCamelCase=0.1 ,**__UpperCamelCase ,) -> List[Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase_ : Dict = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Optional[Any] = backbone_config.get('model_type' )
lowercase_ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Optional[int] = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
lowercase_ , lowercase_ , lowercase_ : Any = None, None, None
lowercase_ : Dict = use_timm_backbone
lowercase_ : Dict = backbone_config
lowercase_ : List[Any] = num_channels
lowercase_ : Any = num_queries
lowercase_ : Tuple = d_model
lowercase_ : str = encoder_ffn_dim
lowercase_ : Optional[int] = encoder_layers
lowercase_ : int = encoder_attention_heads
lowercase_ : List[str] = decoder_ffn_dim
lowercase_ : Dict = decoder_layers
lowercase_ : str = decoder_attention_heads
lowercase_ : List[Any] = dropout
lowercase_ : int = attention_dropout
lowercase_ : List[Any] = activation_dropout
lowercase_ : Dict = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : Optional[int] = init_xavier_std
lowercase_ : str = encoder_layerdrop
lowercase_ : str = decoder_layerdrop
lowercase_ : int = encoder_layers
lowercase_ : str = auxiliary_loss
lowercase_ : Optional[int] = position_embedding_type
lowercase_ : int = backbone
lowercase_ : Dict = use_pretrained_backbone
lowercase_ : Any = dilation
# Hungarian matcher
lowercase_ : Optional[int] = class_cost
lowercase_ : str = bbox_cost
lowercase_ : str = giou_cost
# Loss coefficients
lowercase_ : List[Any] = mask_loss_coefficient
lowercase_ : Dict = dice_loss_coefficient
lowercase_ : Optional[int] = bbox_loss_coefficient
lowercase_ : Any = giou_loss_coefficient
lowercase_ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase ,**__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return cls(backbone_config=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict[str, any]:
'''simple docstring'''
lowercase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase_ : Union[str, Any] = self.backbone_config.to_dict()
lowercase_ : List[str] = self.__class__.model_type
return output
class UpperCamelCase ( lowercase_ ):
lowercase = version.parse('1.11' )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return 12
| 321
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Optional[Any] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : int = num_labels
lowercase_ : Any = num_choices
lowercase_ : int = scope
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : Tuple = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = EsmModelTester(self )
lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : List[Any] = torch.empty(2 ,4 ,30 )
lowercase_ : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase ( lowercase_ ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = 33
lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Any = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Tuple = multiprocessing.Manager()
lowercase_ : str = manager.list()
lowercase_ : List[str] = multiprocessing.Process(target=__SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowercase_ : List[Any] = shutil.rmtree
lowercase_ : Optional[Any] = os.rmdir
lowercase_ : Tuple = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowercase_ : Union[str, Any] = {}
with swallow_io():
with time_limit(__SCREAMING_SNAKE_CASE ):
exec(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
lowercase_ : List[str] = rmtree
lowercase_ : List[str] = rmdir
lowercase_ : Dict = chdir
@contextlib.contextmanager
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
def signal_handler(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , __SCREAMING_SNAKE_CASE )
signal.signal(signal.SIGALRM , __SCREAMING_SNAKE_CASE )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowercase__( ):
lowercase_ : Tuple = WriteOnlyStringIO()
with contextlib.redirect_stdout(__SCREAMING_SNAKE_CASE ):
with contextlib.redirect_stderr(__SCREAMING_SNAKE_CASE ):
with redirect_stdin(__SCREAMING_SNAKE_CASE ):
yield
@contextlib.contextmanager
def lowercase__( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__SCREAMING_SNAKE_CASE ):
yield dirname
class UpperCamelCase ( lowercase_ ):
pass
class UpperCamelCase ( io.StringIO ):
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
raise OSError
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
raise OSError
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
raise OSError
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return False
class UpperCamelCase ( contextlib._RedirectStream ): # type: ignore
lowercase = 'stdin'
@contextlib.contextmanager
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
if root == ".":
yield
return
lowercase_ : Any = os.getcwd()
os.chdir(__SCREAMING_SNAKE_CASE )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowercase_ : List[Any] = None
lowercase_ : str = None
import os
lowercase_ : Any = '1'
lowercase_ : Optional[int] = None
lowercase_ : Dict = None
lowercase_ : Union[str, Any] = None
lowercase_ : List[Any] = None
lowercase_ : Optional[int] = None
lowercase_ : Union[str, Any] = None
lowercase_ : Union[str, Any] = None
lowercase_ : Optional[Any] = None
lowercase_ : List[Any] = None
lowercase_ : List[Any] = None
lowercase_ : Union[str, Any] = None
lowercase_ : Tuple = None
lowercase_ : List[Any] = None
lowercase_ : int = None
lowercase_ : Optional[Any] = None
lowercase_ : Any = None
lowercase_ : List[Any] = None
lowercase_ : List[str] = None
lowercase_ : int = None
lowercase_ : Tuple = None
lowercase_ : Optional[int] = None
lowercase_ : List[str] = None
lowercase_ : Any = None
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : Union[str, Any] = None
lowercase_ : Optional[int] = None
import shutil
lowercase_ : Optional[int] = None
lowercase_ : int = None
lowercase_ : Optional[int] = None
import subprocess
lowercase_ : Optional[int] = None # type: ignore
lowercase_ : Dict = None
import sys
lowercase_ : List[Any] = None
lowercase_ : Tuple = None
lowercase_ : Union[str, Any] = None
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = None
| 321
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase_ : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCamelCase ,scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = None ,__UpperCamelCase = 0.0 ,__UpperCamelCase = 50 ,__UpperCamelCase = None ,__UpperCamelCase = "pil" ,__UpperCamelCase = True ,) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(self.unet.config.sample_size ,__UpperCamelCase ):
lowercase_ : int = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase_ : Optional[Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(__UpperCamelCase ,generator=__UpperCamelCase ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : int = self.unet(__UpperCamelCase ,__UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,eta=__UpperCamelCase ,use_clipped_model_output=__UpperCamelCase ,generator=__UpperCamelCase ).prev_sample
lowercase_ : str = (image / 2 + 0.5).clamp(0 ,1 )
lowercase_ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase_ : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 321
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Union[str, Any] = abs(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Optional[Any] = abs(__SCREAMING_SNAKE_CASE )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
return sum(int(__SCREAMING_SNAKE_CASE ) for c in str(abs(__SCREAMING_SNAKE_CASE ) ) )
def lowercase__( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : int ) -> None:
lowercase_ : int = F'''{func.__name__}({value})'''
lowercase_ : Any = timeit(F'''__main__.{call}''' , setup='import __main__' )
print(F'''{call:56} = {func(__SCREAMING_SNAKE_CASE )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 321
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
| 1
|
"""simple docstring"""
import os
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Optional[int] = len(grid[0] )
lowercase_ : int = len(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = 0
lowercase_ : Optional[Any] = 0
lowercase_ : int = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(n_rows - 3 ):
lowercase_ : Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase_ : int = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase_ : Optional[int] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase_ : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase_ : Any = max(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if max_product > largest:
lowercase_ : Optional[int] = max_product
return largest
def lowercase__( ):
lowercase_ : int = []
with open(os.path.dirname(__SCREAMING_SNAKE_CASE ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
lowercase_ : Union[str, Any] = [[int(__SCREAMING_SNAKE_CASE ) for i in grid[j]] for j in range(len(__SCREAMING_SNAKE_CASE ) )]
return largest_product(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 321
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE =OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
__SCREAMING_SNAKE_CASE =_LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase_ : Dict = model_type_to_module_name(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = importlib.import_module(F'''.{module_name}''' , 'transformers.models' )
try:
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__SCREAMING_SNAKE_CASE , '__name__' , __SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase_ : Tuple = importlib.import_module('transformers' )
if hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return None
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple , ):
lowercase_ : List[str] = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as reader:
return json.load(__SCREAMING_SNAKE_CASE )
class UpperCamelCase :
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(__UpperCamelCase )
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = kwargs.pop('config' ,__UpperCamelCase )
lowercase_ : Optional[Any] = kwargs.pop('trust_remote_code' ,__UpperCamelCase )
lowercase_ : List[Any] = True
lowercase_ , lowercase_ : str = ImageProcessingMixin.get_image_processor_dict(__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : Union[str, Any] = config_dict.get('image_processor_type' ,__UpperCamelCase )
lowercase_ : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
lowercase_ : List[Any] = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase_ : Dict = config_dict.pop('feature_extractor_type' ,__UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
lowercase_ : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
lowercase_ : List[str] = config_dict['auto_map']['AutoFeatureExtractor']
lowercase_ : Dict = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
# It could be in `config.image_processor_type``
lowercase_ : List[Any] = getattr(__UpperCamelCase ,'image_processor_type' ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
lowercase_ : Optional[Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
lowercase_ : str = image_processor_class_from_name(__UpperCamelCase )
lowercase_ : Optional[int] = image_processor_auto_map is not None
lowercase_ : str = image_processor_class is not None or type(__UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
lowercase_ : int = resolve_trust_remote_code(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if has_remote_code and trust_remote_code:
lowercase_ : List[Any] = get_class_from_dynamic_module(
__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = kwargs.pop('code_revision' ,__UpperCamelCase )
if os.path.isdir(__UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__UpperCamelCase ,**__UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__UpperCamelCase ,**__UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
lowercase_ : Dict = IMAGE_PROCESSOR_MAPPING[type(__UpperCamelCase )]
return image_processor_class.from_dict(__UpperCamelCase ,**__UpperCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(__UpperCamelCase ,__UpperCamelCase )
| 321
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321
| 1
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ):
lowercase_ : int = 'backbone.' if is_semantic else ''
lowercase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
for i in range(config.num_hidden_layers ):
lowercase_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = q_bias
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase_ : Tuple = gamma_a
lowercase_ : List[Any] = gamma_a
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = val
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ):
lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Any = 10_24
lowercase_ : List[str] = 40_96
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Optional[Any] = 16
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : int = 'rvlcdip-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase_ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : str = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : int = encoding['pixel_values']
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits
# verify logits
lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321
| 1
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
return getitem, k
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ):
return setitem, k, v
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
return delitem, k
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : List[str] ):
try:
return fun(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ), None
except Exception as e:
return None, e
__SCREAMING_SNAKE_CASE =(
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__SCREAMING_SNAKE_CASE =[
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__SCREAMING_SNAKE_CASE =[
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__SCREAMING_SNAKE_CASE =[
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__SCREAMING_SNAKE_CASE =[
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__SCREAMING_SNAKE_CASE =[
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : Optional[int] = HashMap(initial_block_size=4 )
lowercase_ : Optional[int] = {}
for _, (fun, *args) in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ : List[str] = _run_operation(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Tuple = _run_operation(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
assert my_res == py_res
assert str(__SCREAMING_SNAKE_CASE ) == str(__SCREAMING_SNAKE_CASE )
assert set(__SCREAMING_SNAKE_CASE ) == set(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
assert set(my.items() ) == set(py.items() )
def lowercase__( ):
def is_public(__SCREAMING_SNAKE_CASE : str ) -> bool:
return not name.startswith('_' )
lowercase_ : Union[str, Any] = {name for name in dir({} ) if is_public(__SCREAMING_SNAKE_CASE )}
lowercase_ : int = {name for name in dir(HashMap() ) if is_public(__SCREAMING_SNAKE_CASE )}
assert dict_public_names > hash_public_names
| 321
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 321
| 1
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : bytes , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Optional[Any] = F'''{sampling_rate}'''
lowercase_ : int = '1'
lowercase_ : Dict = 'f32le'
lowercase_ : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase_ : Optional[int] = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowercase_ : str = output_stream[0]
lowercase_ : Union[str, Any] = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : str = "f32le" , ):
lowercase_ : int = F'''{sampling_rate}'''
lowercase_ : Optional[int] = '1'
if format_for_conversion == "s16le":
lowercase_ : List[str] = 2
elif format_for_conversion == "f32le":
lowercase_ : Optional[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase_ : Optional[Any] = platform.system()
if system == "Linux":
lowercase_ : Optional[Any] = 'alsa'
lowercase_ : List[Any] = 'default'
elif system == "Darwin":
lowercase_ : List[str] = 'avfoundation'
lowercase_ : Dict = ':0'
elif system == "Windows":
lowercase_ : Tuple = 'dshow'
lowercase_ : List[Any] = 'default'
lowercase_ : Optional[int] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowercase_ : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase_ : Union[str, Any] = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[Tuple[float, float], float]] = None , __SCREAMING_SNAKE_CASE : str = "f32le" , ):
if stream_chunk_s is not None:
lowercase_ : List[str] = stream_chunk_s
else:
lowercase_ : Optional[Any] = chunk_length_s
lowercase_ : Tuple = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
lowercase_ : Any = np.intaa
lowercase_ : Union[str, Any] = 2
elif format_for_conversion == "f32le":
lowercase_ : Optional[Any] = np.floataa
lowercase_ : List[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase_ : Optional[int] = chunk_length_s / 6
lowercase_ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
lowercase_ : Optional[Any] = [stride_length_s, stride_length_s]
lowercase_ : List[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase_ : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase_ : int = datetime.datetime.now()
lowercase_ : Tuple = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
lowercase_ : Dict = np.frombuffer(item['raw'] , dtype=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowercase_ : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple[int, int] , __SCREAMING_SNAKE_CASE : bool = False ):
lowercase_ : Optional[int] = b''
lowercase_ , lowercase_ : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase_ : Any = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
lowercase_ : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
lowercase_ : List[Any] = (_stride_left, stride_right)
lowercase_ : str = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowercase_ : Any = False
yield item
lowercase_ : Tuple = stride_left
lowercase_ : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
lowercase_ : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowercase_ : Any = False
yield item
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
lowercase_ : Optional[Any] = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321
| 1
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : int = quote(__SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=__SCREAMING_SNAKE_CASE )
| 321
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BlenderbotTokenizer
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) )
lowercase_ : Any = add_prefix_space
lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase )
lowercase_ : int = add_prefix_space
lowercase_ : Any = 'post_processor'
lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
if tokenizer_component_instance:
lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : str = tuple(state['sep'] )
if "cls" in state:
lowercase_ : Union[str, Any] = tuple(state['cls'] )
lowercase_ : str = False
if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Dict = add_prefix_space
lowercase_ : int = True
if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets:
lowercase_ : Optional[Any] = trim_offsets
lowercase_ : Tuple = True
if changes_to_apply:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) )
lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value
lowercase_ : str = value
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : int = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
lowercase_ : Dict = ' '.join(__UpperCamelCase )
lowercase_ : str = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
lowercase_ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 321
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
|
"""simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'}
lowercase_ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : Any = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowercase_ : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Tuple = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowercase_ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
| 321
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowercase_ : int = False
if num < 0:
lowercase_ : List[Any] = True
lowercase_ : List[str] = -num
lowercase_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__SCREAMING_SNAKE_CASE ) for e in binary )
return "0b" + "".join(str(__SCREAMING_SNAKE_CASE ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] ):
lowercase_ : Optional[int] = len(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , __SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
lowercase_ , lowercase_ : List[str] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 321
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = StableDiffusionXLImgaImgPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,attention_head_dim=(2, 4) ,use_linear_projection=__UpperCamelCase ,addition_embed_type='text_time' ,addition_time_embed_dim=8 ,transformer_layers_per_block=(1, 2) ,projection_class_embeddings_input_dim=80 ,cross_attention_dim=64 ,)
lowercase_ : int = EulerDiscreteScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,steps_offset=1 ,beta_schedule='scaled_linear' ,timestep_spacing='leading' ,)
torch.manual_seed(0 )
lowercase_ : int = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
lowercase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=32 ,)
lowercase_ : Tuple = CLIPTextModel(__UpperCamelCase )
lowercase_ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ,local_files_only=__UpperCamelCase )
lowercase_ : Union[str, Any] = CLIPTextModelWithProjection(__UpperCamelCase )
lowercase_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ,local_files_only=__UpperCamelCase )
lowercase_ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=0 ) -> str:
'''simple docstring'''
lowercase_ : Any = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowercase_ : int = image / 2 + 0.5
if str(__UpperCamelCase ).startswith('mps' ):
lowercase_ : Any = torch.manual_seed(__UpperCamelCase )
else:
lowercase_ : int = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowercase_ : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ : int = self.get_dummy_components()
lowercase_ : str = StableDiffusionXLImgaImgPipeline(**__UpperCamelCase )
lowercase_ : Any = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : List[Any] = self.get_dummy_inputs(__UpperCamelCase )
lowercase_ : Any = sd_pipe(**__UpperCamelCase ).images
lowercase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ : List[str] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.get_dummy_components()
lowercase_ : Optional[Any] = StableDiffusionXLImgaImgPipeline(**__UpperCamelCase )
lowercase_ : Dict = sd_pipe.to(__UpperCamelCase )
lowercase_ : Tuple = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
# forward without prompt embeds
lowercase_ : List[str] = self.get_dummy_inputs(__UpperCamelCase )
lowercase_ : str = 3 * ['this is a negative prompt']
lowercase_ : Any = negative_prompt
lowercase_ : Any = 3 * [inputs['prompt']]
lowercase_ : Optional[Any] = sd_pipe(**__UpperCamelCase )
lowercase_ : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowercase_ : Dict = self.get_dummy_inputs(__UpperCamelCase )
lowercase_ : int = 3 * ['this is a negative prompt']
lowercase_ : List[str] = 3 * [inputs.pop('prompt' )]
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = sd_pipe.encode_prompt(__UpperCamelCase ,negative_prompt=__UpperCamelCase )
lowercase_ : Tuple = sd_pipe(
**__UpperCamelCase ,prompt_embeds=__UpperCamelCase ,negative_prompt_embeds=__UpperCamelCase ,pooled_prompt_embeds=__UpperCamelCase ,negative_pooled_prompt_embeds=__UpperCamelCase ,)
lowercase_ : int = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase="cpu" ,__UpperCamelCase=torch.floataa ,__UpperCamelCase=0 ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Any = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowercase_ : Union[str, Any] = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
lowercase_ : Optional[Any] = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ,dtype=__UpperCamelCase )
lowercase_ : Optional[Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Union[str, Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Tuple = self.get_inputs(__UpperCamelCase )
lowercase_ : Optional[int] = pipe(**__UpperCamelCase ).images
lowercase_ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase_ : int = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 321
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
from math import factorial
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
lowercase_ : Optional[Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase_ : List[str] = float(factorial(__SCREAMING_SNAKE_CASE ) )
coefficient /= factorial(__SCREAMING_SNAKE_CASE ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 321
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321
| 1
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321
| 1
|
"""simple docstring"""
import math
from collections.abc import Callable
def lowercase__( __SCREAMING_SNAKE_CASE : Callable[[float], float] , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
lowercase_ : float = xa
lowercase_ : float = xa
while True:
if x_n == x_na or function(__SCREAMING_SNAKE_CASE ) == function(__SCREAMING_SNAKE_CASE ):
raise ZeroDivisionError('float division by zero, could not find root' )
lowercase_ : float = x_na - (
function(__SCREAMING_SNAKE_CASE ) / ((function(__SCREAMING_SNAKE_CASE ) - function(__SCREAMING_SNAKE_CASE )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowercase_ : Union[str, Any] = x_na
lowercase_ : str = x_na
def lowercase__( __SCREAMING_SNAKE_CASE : float ):
return math.pow(__SCREAMING_SNAKE_CASE , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 321
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__SCREAMING_SNAKE_CASE ) )
def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
# Base Case
if index == len(__SCREAMING_SNAKE_CASE ):
return True
# Recursive Step
for i in range(__SCREAMING_SNAKE_CASE ):
if valid_coloring(graph[index] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Color current vertex
lowercase_ : Any = i
# Validate coloring
if util_color(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 ):
return True
# Backtrack
lowercase_ : Dict = -1
return False
def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Optional[Any] = [-1] * len(__SCREAMING_SNAKE_CASE )
if util_color(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 0 ):
return colored_vertices
return []
| 321
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
| 1
|
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ : str = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=__SCREAMING_SNAKE_CASE )
lowercase_ : str = checkpoints.load_tax_checkpoint(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
lowercase_ : List[Any] = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase_ : Optional[Any] = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : Any = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
lowercase_ : Dict = F'''layers_{str(__SCREAMING_SNAKE_CASE )}'''
# Self-Attention
lowercase_ : int = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
lowercase_ : List[str] = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
lowercase_ : Tuple = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
lowercase_ : Union[str, Any] = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : Union[str, Any] = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
lowercase_ : int = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
lowercase_ : List[str] = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
lowercase_ : Union[str, Any] = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
lowercase_ : Optional[int] = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
lowercase_ : Optional[int] = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
lowercase_ : List[Any] = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
lowercase_ : Optional[Any] = flax_model.params['encoder']['block'][str(__SCREAMING_SNAKE_CASE )]['layer']
lowercase_ : Any = tax_attention_key
lowercase_ : str = tax_attention_out
lowercase_ : Tuple = tax_attention_query
lowercase_ : str = tax_attention_value
lowercase_ : Optional[int] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : List[Any] = tax_global_layer_norm
if split_mlp_wi:
lowercase_ : Any = tax_mlp_wi_a
lowercase_ : int = tax_mlp_wi_a
else:
lowercase_ : Dict = tax_mlp_wi
lowercase_ : Dict = tax_mlp_wo
lowercase_ : int = tax_mlp_layer_norm
lowercase_ : List[Any] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase_ : List[str] = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
lowercase_ : List[str] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : Optional[Any] = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
lowercase_ : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
lowercase_ : str = tax_model['target']['encoder']['encoder_norm']['scale']
lowercase_ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowercase_ : Optional[int] = F'''layers_{str(__SCREAMING_SNAKE_CASE )}'''
# Self-Attention
lowercase_ : str = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
lowercase_ : Tuple = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
lowercase_ : str = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
lowercase_ : str = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
lowercase_ : int = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
lowercase_ : int = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
lowercase_ : Any = tax_enc_dec_attention_module['key']['kernel']
lowercase_ : List[str] = tax_enc_dec_attention_module['out']['kernel']
lowercase_ : Optional[int] = tax_enc_dec_attention_module['query']['kernel']
lowercase_ : Any = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
lowercase_ : str = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
lowercase_ : Union[str, Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
lowercase_ : Union[str, Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
lowercase_ : Any = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
lowercase_ : Dict = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
lowercase_ : str = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
lowercase_ : int = flax_model.params['decoder']['block'][str(__SCREAMING_SNAKE_CASE )]['layer']
lowercase_ : Dict = tax_attention_key
lowercase_ : Optional[int] = tax_attention_out
lowercase_ : Union[str, Any] = tax_attention_query
lowercase_ : Tuple = tax_attention_value
lowercase_ : Optional[Any] = tax_pre_attention_layer_norm
lowercase_ : Dict = tax_enc_dec_attention_key
lowercase_ : List[str] = tax_enc_dec_attention_out
lowercase_ : List[str] = tax_enc_dec_attention_query
lowercase_ : Optional[Any] = tax_enc_dec_attention_value
lowercase_ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase_ : List[Any] = tax_mlp_wi_a
lowercase_ : str = tax_mlp_wi_a
else:
lowercase_ : List[Any] = tax_mlp_wi
lowercase_ : Dict = tax_mlp_wo
lowercase_ : Optional[int] = txa_mlp_layer_norm
lowercase_ : Tuple = flax_model_decoder_layer_block
# Decoder Normalization
lowercase_ : Optional[int] = tax_model['target']['decoder']['decoder_norm']['scale']
lowercase_ : Union[str, Any] = txa_decoder_norm
# Only for layer 0:
lowercase_ : int = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
lowercase_ : Dict = tax_decoder_rel_embedding
# Token Embeddings
lowercase_ : List[Any] = tax_model['target']['token_embedder']['embedding']
lowercase_ : str = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase_ : List[str] = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(__SCREAMING_SNAKE_CASE )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 321
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Optional[Any] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : int = num_labels
lowercase_ : Any = num_choices
lowercase_ : int = scope
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : Tuple = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = EsmModelTester(self )
lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : List[Any] = torch.empty(2 ,4 ,30 )
lowercase_ : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase ( lowercase_ ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = 33
lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Any = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
# TODO: upload to AWS
__SCREAMING_SNAKE_CASE ={
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'retribert'
def __init__( self ,__UpperCamelCase=3_0522 ,__UpperCamelCase=768 ,__UpperCamelCase=8 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=True ,__UpperCamelCase=128 ,__UpperCamelCase=0 ,**__UpperCamelCase ,) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : Optional[int] = vocab_size
lowercase_ : Optional[int] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : Optional[Any] = hidden_act
lowercase_ : str = intermediate_size
lowercase_ : Any = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : Any = share_encoders
lowercase_ : int = projection_dim
| 321
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 1
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase__( ):
lowercase_ : List[Any] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def lowercase__( ):
lowercase_ : str = parse_args()
# Import training_script as a module.
lowercase_ : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ : List[str] = script_fpath.stem
lowercase_ : Dict = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowercase_ : Tuple = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 321
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
| 1
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = filter(lambda __SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
lowercase_ : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__SCREAMING_SNAKE_CASE =logging.getLogger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ):
if metric == "rouge2":
lowercase_ : Union[str, Any] = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
lowercase_ : List[str] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
lowercase_ : Dict = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
lowercase_ : Dict = ModelCheckpoint(
dirpath=__SCREAMING_SNAKE_CASE , filename=__SCREAMING_SNAKE_CASE , monitor=F'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , )
class UpperCamelCase ( pl.Callback ):
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Dict = {f'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=True ) -> None:
'''simple docstring'''
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowercase_ : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
lowercase_ : Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase_ : List[Any] = od / 'test_results.txt'
lowercase_ : int = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase_ : int = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowercase_ : Any = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase ,'a+' ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase_ : Tuple = metrics[key]
if isinstance(__UpperCamelCase ,torch.Tensor ):
lowercase_ : Union[str, Any] = val.item()
lowercase_ : Tuple = f'''{key}: {val:.6f}\n'''
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
lowercase_ : Any = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(__UpperCamelCase )
@rank_zero_only
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
lowercase_ : Any = pl_module.model.model.num_parameters()
except AttributeError:
lowercase_ : str = pl_module.model.num_parameters()
lowercase_ : List[Any] = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase ,__UpperCamelCase ,'test' )
@rank_zero_only
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 321
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.