code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''roberta'''
def __init__( self :str , lowerCAmelCase__ :Any=50_265 , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :List[Any]="gelu" , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :int=512 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Union[str, Any]=1E-1_2 , lowerCAmelCase__ :Optional[int]=1 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :int="absolute" , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :int , ) -> List[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Any = position_embedding_type
__SCREAMING_SNAKE_CASE : str = use_cache
__SCREAMING_SNAKE_CASE : Tuple = classifier_dropout
class _lowercase ( A__ ):
'''simple docstring'''
@property
def __magic_name__( self :str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10
| 0
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
UpperCamelCase : Any = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ )
try:
UpperCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCamelCase : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCamelCase : Optional[Any] = ''' '''.join(str(SCREAMING_SNAKE_CASE_ ).split(''' ''' )[:-1] )
UpperCamelCase : str = ''''''
UpperCamelCase : List[str] = eval(str(SCREAMING_SNAKE_CASE_ ).split(''' ''' )[-1] )
UpperCamelCase : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : Any = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ )
raise ValueError(SCREAMING_SNAKE_CASE_ )
benchmark.run()
if __name__ == "__main__":
main()
| 315
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315
| 1
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase : Dict = Mapping[str, np.ndarray]
_UpperCAmelCase : Dict = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase : Dict = 0.01
@dataclasses.dataclass(frozen=_SCREAMING_SNAKE_CASE )
class lowercase :
__lowercase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
__lowercase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
__lowercase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
__lowercase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
__lowercase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
__lowercase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
__lowercase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
__lowercase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
__lowercase : Optional[Sequence[int]] = None
def A ( lowercase ) -> Protein:
'''simple docstring'''
UpperCamelCase = R'(\[[A-Z]+\]\n)'
UpperCamelCase = [tag.strip() for tag in re.split(lowercase , lowercase ) if len(lowercase ) > 0]
UpperCamelCase = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
UpperCamelCase = ["N", "CA", "C"]
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCamelCase = g[1][0].strip()
for i in range(len(lowercase ) ):
if seq[i] not in residue_constants.restypes:
UpperCamelCase = 'X' # FIXME: strings are immutable
UpperCamelCase = np.array(
[residue_constants.restype_order.get(lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCamelCase = []
for axis in range(3 ):
tertiary.append(list(map(lowercase , g[1][axis].split() ) ) )
UpperCamelCase = np.array(lowercase )
UpperCamelCase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase ):
UpperCamelCase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCamelCase = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
UpperCamelCase = np.zeros(
(
len(lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase ):
UpperCamelCase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase , atom_mask=lowercase , aatype=lowercase , residue_index=np.arange(len(lowercase ) ) , b_factors=lowercase , )
def A ( lowercase , lowercase = 0 ) -> List[str]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = prot.remark
if remark is not None:
pdb_headers.append(f'''REMARK {remark}''' )
UpperCamelCase = prot.parents
UpperCamelCase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCamelCase = [p for i, p in zip(lowercase , lowercase ) if i == chain_id]
if parents is None or len(lowercase ) == 0:
UpperCamelCase = ['N/A']
pdb_headers.append(f'''PARENT {' '.join(lowercase )}''' )
return pdb_headers
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = pdb_str.split('\n' )
UpperCamelCase = prot.remark
if remark is not None:
out_pdb_lines.append(f'''REMARK {remark}''' )
UpperCamelCase = 42
if prot.parents is not None and len(prot.parents ) > 0:
UpperCamelCase = []
if prot.parents_chain_index is not None:
UpperCamelCase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase ) , [] )
parent_dict[str(lowercase )].append(lowercase )
UpperCamelCase = max([int(lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCamelCase = parent_dict.get(str(lowercase ) , ['N/A'] )
parents_per_chain.append(lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCamelCase = [['N/A']]
def make_parent_line(lowercase ) -> str:
return f'''PARENT {' '.join(lowercase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCamelCase = 0
for i, l in enumerate(lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase ):
UpperCamelCase = parents_per_chain[chain_counter]
else:
UpperCamelCase = ['N/A']
out_pdb_lines.append(make_parent_line(lowercase ) )
return "\n".join(lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = residue_constants.restypes + ['X']
def res_atoa(lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
UpperCamelCase = residue_constants.atom_types
UpperCamelCase = []
UpperCamelCase = prot.atom_mask
UpperCamelCase = prot.aatype
UpperCamelCase = prot.atom_positions
UpperCamelCase = prot.residue_index.astype(np.intaa )
UpperCamelCase = prot.b_factors
UpperCamelCase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
UpperCamelCase = get_pdb_headers(lowercase )
if len(lowercase ) > 0:
pdb_lines.extend(lowercase )
UpperCamelCase = aatype.shape[0]
UpperCamelCase = 1
UpperCamelCase = 0
UpperCamelCase = string.ascii_uppercase
UpperCamelCase = None
# Add all atom sites.
for i in range(lowercase ):
UpperCamelCase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
UpperCamelCase = 'ATOM'
UpperCamelCase = atom_name if len(lowercase ) == 4 else f''' {atom_name}'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = 1.0_0
UpperCamelCase = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCamelCase = ''
UpperCamelCase = 'A'
if chain_index is not None:
UpperCamelCase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCamelCase = (
f'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
f'''{res_name_a:>3} {chain_tag:>1}'''
f'''{residue_index[i]:>4}{insertion_code:>1} '''
f'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
f'''{occupancy:>6.2f}{b_factor:>6.2f} '''
f'''{element:>2}{charge:>2}'''
)
pdb_lines.append(lowercase )
atom_index += 1
UpperCamelCase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCamelCase = True
UpperCamelCase = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCamelCase = 'TER'
UpperCamelCase = (
f'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase , lowercase ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(lowercase )
def A ( lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def A ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=lowercase , remark=lowercase , parents=lowercase , parents_chain_index=lowercase , )
| 222
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
UpperCamelCase = Dataset.from_dict(lowercase )
return dataset
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = get_dataset()
UpperCamelCase = make_duplicate_clusters(A_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = get_dataset()
UpperCamelCase , UpperCamelCase = deduplicate_dataset(A_ )
self.assertEqual(len(A_ ) , 2 )
print(A_ )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , A_ )
| 222
| 1
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase : Union[str, Any] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCAmelCase : int = None
def _A ( ):
"""simple docstring"""
a__ : int =argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=SCREAMING_SNAKE_CASE , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=SCREAMING_SNAKE_CASE , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _A ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
a__ : Any ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : int =bool(qa["answers"]["text"] )
return qid_to_has_ans
def _A ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
def remove_articles(SCREAMING_SNAKE_CASE : Optional[int] ):
return ARTICLES_REGEX.sub(" " , SCREAMING_SNAKE_CASE )
def white_space_fix(SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE : Tuple ):
a__ : Tuple =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE ) ) ) )
def _A ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if not s:
return []
return normalize_answer(SCREAMING_SNAKE_CASE ).split()
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return int(normalize_answer(SCREAMING_SNAKE_CASE ) == normalize_answer(SCREAMING_SNAKE_CASE ) )
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
a__ : Tuple =get_tokens(SCREAMING_SNAKE_CASE )
a__ : int =get_tokens(SCREAMING_SNAKE_CASE )
a__ : Dict =collections.Counter(SCREAMING_SNAKE_CASE ) & collections.Counter(SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =sum(common.values() )
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__ : Dict =1.0 * num_same / len(SCREAMING_SNAKE_CASE )
a__ : Tuple =1.0 * num_same / len(SCREAMING_SNAKE_CASE )
a__ : Dict =(2 * precision * recall) / (precision + recall)
return fa
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Dict ={}
a__ : Optional[Any] ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : Tuple =qa["id"]
a__ : Tuple =[t for t in qa["answers"]["text"] if normalize_answer(SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__ : Optional[int] =[""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
a__ : int =preds[qid]
# Take max over all gold answers
a__ : Union[str, Any] =max(compute_exact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for a in gold_answers )
a__ : Optional[Any] =max(compute_fa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : str ={}
for qid, s in scores.items():
a__ : List[Any] =na_probs[qid] > na_prob_thresh
if pred_na:
a__ : Tuple =float(not qid_to_has_ans[qid] )
else:
a__ : Any =s
return new_scores
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
if not qid_list:
a__ : Dict =len(SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("exact", 1_0_0.0 * sum(exact_scores.values() ) / total),
("f1", 1_0_0.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
a__ : Optional[int] =len(SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("exact", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
for k in new_eval:
a__ : List[Any] =new_eval[k]
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
plt.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , color="b" , alpha=0.2 , where="post" )
plt.fill_between(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(SCREAMING_SNAKE_CASE )
plt.savefig(SCREAMING_SNAKE_CASE )
plt.clf()
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
a__ : Any =sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : na_probs[k] )
a__ : str =0.0
a__ : Dict =1.0
a__ : Tuple =0.0
a__ : List[Any] =[1.0]
a__ : Dict =[0.0]
a__ : Any =0.0
for i, qid in enumerate(SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__ : List[str] =true_pos / float(i + 1 )
a__ : List[Any] =true_pos / float(SCREAMING_SNAKE_CASE )
if i == len(SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(SCREAMING_SNAKE_CASE )
recalls.append(SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"ap": 1_0_0.0 * avg_prec}
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if out_image_dir and not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
a__ : str =sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__ : Tuple =make_precision_recall_eval(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , out_image=os.path.join(SCREAMING_SNAKE_CASE , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
a__ : int =make_precision_recall_eval(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , out_image=os.path.join(SCREAMING_SNAKE_CASE , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
a__ : str ={k: float(SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
a__ : Dict =make_precision_recall_eval(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , out_image=os.path.join(SCREAMING_SNAKE_CASE , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "pr_exact" )
merge_eval(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "pr_f1" )
merge_eval(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "pr_oracle" )
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if not qid_list:
return
a__ : Any =[na_probs[k] for k in qid_list]
a__ : Optional[Any] =np.ones_like(SCREAMING_SNAKE_CASE ) / float(len(SCREAMING_SNAKE_CASE ) )
plt.hist(SCREAMING_SNAKE_CASE , weights=SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(SCREAMING_SNAKE_CASE , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[int] =sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__ : Dict =num_no_ans
a__ : Dict =cur_score
a__ : str =0.0
a__ : Union[str, Any] =sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__ : Union[str, Any] =scores[qid]
else:
if preds[qid]:
a__ : Optional[Any] =-1
else:
a__ : Union[str, Any] =0
cur_score += diff
if cur_score > best_score:
a__ : Optional[int] =cur_score
a__ : Optional[int] =na_probs[qid]
return 1_0_0.0 * best_score / len(SCREAMING_SNAKE_CASE ), best_thresh
def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ , a__ : Optional[int] =find_best_thresh(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ , a__ : int =find_best_thresh(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =best_exact
a__ : Tuple =exact_thresh
a__ : Any =best_fa
a__ : List[Any] =fa_thresh
def _A ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
a__ : str =json.load(SCREAMING_SNAKE_CASE )
a__ : Any =dataset_json["data"]
with open(OPTS.pred_file ) as f:
a__ : List[str] =json.load(SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__ : int =json.load(SCREAMING_SNAKE_CASE )
else:
a__ : Tuple ={k: 0.0 for k in preds}
a__ : Any =make_qid_to_has_ans(SCREAMING_SNAKE_CASE ) # maps qid to True/False
a__ : Optional[int] =[k for k, v in qid_to_has_ans.items() if v]
a__ : List[str] =[k for k, v in qid_to_has_ans.items() if not v]
a__ , a__ : Optional[int] =get_raw_scores(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =apply_no_ans_threshold(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__ : int =apply_no_ans_threshold(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__ : str =make_eval_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if has_ans_qids:
a__ : Union[str, Any] =make_eval_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , qid_list=SCREAMING_SNAKE_CASE )
merge_eval(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "HasAns" )
if no_ans_qids:
a__ : List[Any] =make_eval_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , qid_list=SCREAMING_SNAKE_CASE )
merge_eval(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
print(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 148
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( UpperCamelCase__):
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[int] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "depth_multiplier" ) )
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=0.25 , lowerCAmelCase__=8 , lowerCAmelCase__=True , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__="relu6" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1_0 , lowerCAmelCase__=None , ) -> Dict:
'''simple docstring'''
a__ : int =parent
a__ : Optional[Any] =batch_size
a__ : Tuple =num_channels
a__ : Dict =image_size
a__ : Union[str, Any] =depth_multiplier
a__ : List[str] =min_depth
a__ : Dict =tf_padding
a__ : Any =int(last_hidden_size * depth_multiplier )
a__ : Tuple =output_stride
a__ : Optional[Any] =hidden_act
a__ : str =classifier_dropout_prob
a__ : int =use_labels
a__ : List[Any] =is_training
a__ : List[str] =num_labels
a__ : Dict =initializer_range
a__ : Tuple =scope
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Any =None
a__ : List[Any] =None
if self.use_labels:
a__ : Dict =ids_tensor([self.batch_size] , self.num_labels )
a__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[Any] =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self ) -> str:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] =MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : str =model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Any =self.num_labels
a__ : Dict =MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[int] =model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : str =self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : List[str] =config_and_inputs
a__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : List[str] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_lowercase : Optional[int] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : str = False
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] =MobileNetVaModelTester(self )
a__ : Optional[Any] =MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ , a__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[int] =model_class(lowerCAmelCase__ )
a__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] =[*signature.parameters.keys()]
a__ : Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : int =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
a__ : List[Any] =model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[int] =outputs.hidden_states
a__ : Optional[int] =2_6
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
a__ , a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[int] =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Union[str, Any] =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[str] =MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( ):
"""simple docstring"""
a__ : Optional[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : str =MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(lowerCAmelCase__ )
a__ : Optional[Any] =self.default_image_processor
a__ : Optional[int] =prepare_img()
a__ : Optional[int] =image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
a__ : int =model(**lowerCAmelCase__ )
# verify the logits
a__ : str =torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a__ : int =torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 148
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase__ : List[Any] = "\\n Text data.\n Second line of data."
lowercase__ : List[str] = "file"
@pytest.fixture(scope="session" )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
snake_case_ = bytes(__lowerCAmelCase , "utf-8" )
with zstd.open(__lowerCAmelCase , "wb" ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __lowerCAmelCase ) , "w" ) as f:
f.write(__lowerCAmelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
snake_case_ = input_paths[compression_format]
snake_case_ = tmp_path / "cache"
snake_case_ = DownloadConfig(cache_dir=__lowerCAmelCase , extract_compressed_file=__lowerCAmelCase )
snake_case_ = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
with open(__lowerCAmelCase ) as f:
snake_case_ = f.read()
with open(__lowerCAmelCase ) as f:
snake_case_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = "custom_cache"
snake_case_ = "custom_extracted_dir"
snake_case_ = tmp_path / "custom_extracted_path"
if default_extracted:
snake_case_ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __lowerCAmelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__lowerCAmelCase ) )
snake_case_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case_ = xz_file
snake_case_ = (
DownloadConfig(extract_compressed_file=__lowerCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCAmelCase )
)
snake_case_ = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
assert Path(__lowerCAmelCase ).parent.parts[-2:] == expected
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = str(Path(__lowerCAmelCase ).resolve() )
assert cached_path(__lowerCAmelCase ) == text_file
# relative path
snake_case_ = str(Path(__lowerCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCAmelCase ) == text_file
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
# relative path
snake_case_ = "./__missing_file__.txt"
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = get_from_cache(f"tmp://{tmpfs_file}" )
with open(__lowerCAmelCase ) as f:
snake_case_ = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCAmelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
with pytest.raises(__lowerCAmelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCAmelCase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCAmelCase ):
http_get("https://huggingface.co" , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCAmelCase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCAmelCase ):
ftp_get("ftp://huggingface.co" , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCAmelCase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCAmelCase ):
fsspec_get("s3://huggingface.co" , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
fsspec_head("s3://huggingface.co" )
| 187
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Dict = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 200
|
from __future__ import annotations
import os
from typing import Any
import requests
_UpperCAmelCase : int = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_UpperCAmelCase : Dict = BASE_URL + """/user"""
# https://github.com/settings/tokens
_UpperCAmelCase : Optional[Any] = os.environ.get("""USER_TOKEN""", """""")
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {
'Authorization': F'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 200
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=30 , __a=400 , __a=True , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , __a=True , __a=1 / 255 , __a=True , ):
'''simple docstring'''
__a : Union[str, Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__a : List[Any] = parent
__a : Union[str, Any] = batch_size
__a : Union[str, Any] = num_channels
__a : Optional[int] = min_resolution
__a : Any = max_resolution
__a : Union[str, Any] = do_resize
__a : Tuple = size
__a : str = do_normalize
__a : Optional[Any] = image_mean
__a : List[str] = image_std
__a : str = do_rescale
__a : Optional[int] = rescale_factor
__a : Optional[int] = do_pad
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self , __a , __a=False ):
'''simple docstring'''
if not batched:
__a : int = image_inputs[0]
if isinstance(__a , Image.Image ):
__a , __a : List[Any] = image.size
else:
__a , __a : int = image.shape[1], image.shape[2]
if w < h:
__a : Dict = int(self.size['shortest_edge'] * h / w )
__a : Optional[Any] = self.size['shortest_edge']
elif w > h:
__a : List[Any] = self.size['shortest_edge']
__a : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__a : List[Any] = self.size['shortest_edge']
__a : List[Any] = self.size['shortest_edge']
else:
__a : Tuple = []
for image in image_inputs:
__a , __a : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : Tuple = max(__a , key=lambda __a : item[0] )[0]
__a : Tuple = max(__a , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCamelCase ( a_ , unittest.TestCase ):
A_ = ConditionalDetrImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = ConditionalDetrImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'image_mean' ) )
self.assertTrue(hasattr(__a , 'image_std' ) )
self.assertTrue(hasattr(__a , 'do_normalize' ) )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __a )
__a : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a )
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Optional[Any] = image_processing(__a , return_tensors='pt' ).pixel_values
__a , __a : Tuple = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a : Union[str, Any] = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Optional[int] = image_processing(__a , return_tensors='pt' ).pixel_values
__a , __a : Union[str, Any] = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__a : str = json.loads(f.read() )
__a : Optional[int] = {'image_id': 3_9769, 'annotations': target}
# encode them
__a : List[str] = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
__a : List[str] = image_processing(images=__a , annotations=__a , return_tensors='pt' )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __a )
__a : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
__a : int = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __a ) )
# verify boxes
__a : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __a )
__a : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __a , atol=1E-3 ) )
# verify image_id
__a : Optional[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __a ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __a ) )
# verify class_labels
__a : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __a ) )
# verify orig_size
__a : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __a ) )
# verify size
__a : Dict = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __a ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__a : Any = json.loads(f.read() )
__a : Tuple = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
__a : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__a : Dict = ConditionalDetrImageProcessor(format='coco_panoptic' )
__a : Dict = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors='pt' )
# verify pixel values
__a : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __a )
__a : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
__a : int = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __a ) )
# verify boxes
__a : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __a )
__a : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __a , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __a ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __a ) )
# verify class_labels
__a : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __a ) )
# verify masks
__a : List[Any] = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __a )
# verify orig_size
__a : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __a ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __a ) )
| 27
|
import enum
import shutil
import sys
UpperCAmelCase, UpperCAmelCase : Union[str, Any] = shutil.get_terminal_size()
UpperCAmelCase : Dict = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __lowercase ( enum.Enum ):
"""simple docstring"""
UpperCamelCase : Any = 0
UpperCamelCase : int = 1
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any="" ):
'''simple docstring'''
sys.stdout.write(str(lowerCamelCase__ ) + end )
sys.stdout.flush()
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple="" ):
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite("""\r""" )
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : str ):
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ):
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 252
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowerCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Dict =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BICUBIC ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = None ,_snake_case = None ,_snake_case = True ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Tuple = size if size is not None else {"shortest_edge": 2_24}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : str = size
UpperCAmelCase_ : Dict = resample
UpperCAmelCase_ : Tuple = do_center_crop
UpperCAmelCase_ : int = crop_size
UpperCAmelCase_ : Any = do_rescale
UpperCAmelCase_ : int = rescale_factor
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BICUBIC ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : str = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase_ : int = get_resize_output_image_size(_snake_case ,size=size["shortest_edge"] ,default_to_square=_snake_case )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Tuple = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = size if size is not None else self.size
UpperCAmelCase_ : Tuple = get_size_dict(_snake_case ,param_name="size" ,default_to_square=_snake_case )
UpperCAmelCase_ : Any = resample if resample is not None else self.resample
UpperCAmelCase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : str = get_size_dict(_snake_case ,param_name="crop_size" ,default_to_square=_snake_case )
UpperCAmelCase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Tuple = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : List[str] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : List[Any] = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : Dict = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
UpperCAmelCase_ : List[str] = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase_ : List[str] = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
UpperCAmelCase_ : List[str] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
UpperCAmelCase_ : Optional[int] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
UpperCAmelCase_ : int = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
UpperCAmelCase_ : Dict = {"pixel_values": images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 67
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""MobileViTFeatureExtractor"""]
_lowerCamelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67
| 1
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : List[str]) -> str:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Optional[int] = np.full((len(_lowerCamelCase), sequence_length, 2) , _lowerCamelCase)
else:
__UpperCamelCase : str = np.full((len(_lowerCamelCase), sequence_length) , _lowerCamelCase)
for i, tensor in enumerate(_lowerCamelCase):
if padding_side == "right":
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : List[Any] = tensor[:sequence_length]
else:
__UpperCamelCase : Dict = tensor[:sequence_length]
else:
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Optional[int] = tensor[:sequence_length]
else:
__UpperCamelCase : int = tensor[:sequence_length]
return out_tensor.tolist()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int]) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ord(_lowerCamelCase)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__UpperCamelCase : str = unicodedata.category(_lowerCamelCase)
if cat.startswith("P"):
return True
return False
@dataclass
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -1_0_0
_A = "pt"
def _lowerCamelCase ( self :Optional[int] , a :List[str] ) -> List[Any]:
import torch
__UpperCamelCase : Any = """label""" if """label""" in features[0].keys() else """labels"""
__UpperCamelCase : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__UpperCamelCase : List[Any] = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__UpperCamelCase : Tuple = torch.tensor(batch["entity_ids"] ).shape[1]
__UpperCamelCase : Optional[int] = self.tokenizer.padding_side
if padding_side == "right":
__UpperCamelCase : List[str] = [
list(lowerCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) for label in labels
]
else:
__UpperCamelCase : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) + list(lowerCAmelCase__ ) for label in labels
]
__UpperCamelCase : Optional[Any] = [feature["""ner_tags"""] for feature in features]
__UpperCamelCase : List[str] = padding_tensor(lowerCAmelCase__ , -1 , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCamelCase : str = [feature["""original_entity_spans"""] for feature in features]
__UpperCamelCase : int = padding_tensor(lowerCAmelCase__ , (-1, -1) , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCamelCase : Any = {k: torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 232
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCamelCase__ : List[Any] = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : int = bs[:]
__SCREAMING_SNAKE_CASE : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Dict = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : Optional[int] = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[str] = VOCAB_FILES_NAMES
_A : Tuple = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple="replace" , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : Any="<s>" , lowerCAmelCase__ : str="<unk>" , lowerCAmelCase__ : Tuple="<pad>" , lowerCAmelCase__ : Union[str, Any]="<mask>" , lowerCAmelCase__ : Dict=False , **lowerCAmelCase__ : Optional[int] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : Dict = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Optional[int] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Tuple = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Optional[int] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = bigram
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : List[str] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Any = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Dict = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Tuple = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = word
return word
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : str = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=False , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : List[str] = """ """ + text
return (text, kwargs)
| 112
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[Any] = StableDiffusionInstructPixaPixPipeline
_UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
_UpperCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCamelCase ( self : str ) ->Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowerCamelCase__ : int = PNDMScheduler(skip_prk_steps=A )
torch.manual_seed(0 )
lowerCamelCase__ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase__ : List[Any] = CLIPTextModel(A )
lowerCamelCase__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase__ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCamelCase ( self : Dict , A : Any , A : Tuple=0 ) ->List[Any]:
lowerCamelCase__ : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
lowerCamelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ : List[str] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' )
if str(A ).startswith('''mps''' ):
lowerCamelCase__ : Any = torch.manual_seed(A )
else:
lowerCamelCase__ : int = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase__ : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self : List[Any] ) ->Dict:
lowerCamelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Any = self.get_dummy_components()
lowerCamelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline(**A )
lowerCamelCase__ : List[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : Optional[int] = self.get_dummy_inputs(A )
lowerCamelCase__ : Any = sd_pipe(**A ).images
lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : List[str] = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCamelCase ( self : Optional[int] ) ->List[str]:
lowerCamelCase__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Any = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**A )
lowerCamelCase__ : str = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : Tuple = self.get_dummy_inputs(A )
lowerCamelCase__ : Dict = '''french fries'''
lowerCamelCase__ : Dict = sd_pipe(**A , negative_prompt=A )
lowerCamelCase__ : Dict = output.images
lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : Dict = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCamelCase ( self : Any ) ->int:
lowerCamelCase__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Optional[Any] = self.get_dummy_components()
lowerCamelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**A )
lowerCamelCase__ : List[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : Optional[int] = self.get_dummy_inputs(A )
lowerCamelCase__ : Dict = [inputs['''prompt''']] * 2
lowerCamelCase__ : int = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
lowerCamelCase__ : str = torch.from_numpy(A ).unsqueeze(0 ).to(A )
lowerCamelCase__ : List[Any] = image / 2 + 0.5
lowerCamelCase__ : int = image.permute(0 , 3 , 1 , 2 )
lowerCamelCase__ : str = image.repeat(2 , 1 , 1 , 1 )
lowerCamelCase__ : Optional[Any] = sd_pipe(**A ).images
lowerCamelCase__ : List[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
lowerCamelCase__ : List[str] = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCamelCase ( self : Optional[int] ) ->List[str]:
lowerCamelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[str] = self.get_dummy_components()
lowerCamelCase__ : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowerCamelCase__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**A )
lowerCamelCase__ : Any = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : List[Any] = self.get_dummy_inputs(A )
lowerCamelCase__ : int = sd_pipe(**A ).images
lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCamelCase__ : List[str] = [round(A , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : List[Any] = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCamelCase ( self : Optional[int] ) ->Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __lowerCamelCase ( self : Dict ) ->Optional[int]:
lowerCamelCase__ : Any = self.get_dummy_components()
lowerCamelCase__ : str = StableDiffusionInstructPixaPixPipeline(**A )
lowerCamelCase__ : Dict = VaeImageProcessor(do_resize=A , do_normalize=A )
lowerCamelCase__ : Tuple = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : int = pipe(**self.get_dummy_inputs_by_type(A , input_image_type='''pt''' ) )[0]
lowerCamelCase__ : int = components['''vae''']
lowerCamelCase__ : Any = self.get_dummy_inputs_by_type(A , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase__ : List[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase__ : Union[str, Any] = pipe(**A )[0]
lowerCamelCase__ : Any = np.abs(out - out_latents_inputs ).max()
self.assertLess(A , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : List[Any] ) ->Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : str , A : Optional[int]=0 ) ->Dict:
lowerCamelCase__ : Tuple = torch.manual_seed(A )
lowerCamelCase__ : Tuple = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
lowerCamelCase__ : int = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self : Dict ) ->int:
lowerCamelCase__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase__ : Tuple = self.get_inputs()
lowerCamelCase__ : int = pipe(**A ).images
lowerCamelCase__ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Tuple = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowerCamelCase ( self : List[str] ) ->List[Any]:
lowerCamelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=A )
lowerCamelCase__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase__ : List[str] = self.get_inputs()
lowerCamelCase__ : int = pipe(**A ).images
lowerCamelCase__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : List[str] = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=A )
lowerCamelCase__ : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase__ : Tuple = self.get_inputs()
lowerCamelCase__ : Any = pipe(**A ).images
lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
lowerCamelCase__ : str = 0
def callback_fn(A : int , A : int , A : torch.FloatTensor ) -> None:
lowerCamelCase__ : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase__ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase__ : Any = latents[0, -3:, -3:, -1]
lowerCamelCase__ : Tuple = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase__ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase__ : List[str] = latents[0, -3:, -3:, -1]
lowerCamelCase__ : int = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=A , torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase__ : Optional[int] = self.get_inputs()
pipe(**A , callback=A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowerCamelCase ( self : Any ) ->List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=A , torch_dtype=torch.floataa )
lowerCamelCase__ : List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : Tuple = self.get_inputs()
lowerCamelCase__ : List[Any] = pipe(**A )
lowerCamelCase__ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def __lowerCamelCase ( self : str ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase__ : Any = inputs['''image'''].resize((5_0_4, 5_0_4) )
lowerCamelCase__ : Dict = '''timbrooks/instruct-pix2pix'''
lowerCamelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase__ : List[str] = pipe(**A )
lowerCamelCase__ : List[str] = output.images[0]
lowerCamelCase__ : Tuple = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
lowerCamelCase__ : str = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 350
|
import pprint
import requests
_A : Any = 'https://zenquotes.io/api'
def _a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_A : Optional[Any] = random_quotes()
pprint.pprint(response)
| 265
| 0
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __lowerCAmelCase :
@property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.get_dummy_input()
@property
def _lowercase ( self ) -> Any:
'''simple docstring'''
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def _lowercase ( self , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , ) -> List[str]:
'''simple docstring'''
a__ : List[str] =4
a__ : str =3_2
a__ : Dict =(3_2, 3_2)
a__ : Optional[int] =torch.manual_seed(0 )
a__ : List[Any] =torch.device(lowerCAmelCase__ )
a__ : Union[str, Any] =(batch_size, num_channels) + sizes
a__ : Dict =randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ )
a__ : Optional[int] ={"hidden_states": hidden_states}
if include_temb:
a__ : List[Any] =1_2_8
a__ : List[str] =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase__ , device=lowerCAmelCase__ )
if include_res_hidden_states_tuple:
a__ : Optional[int] =torch.manual_seed(1 )
a__ : Tuple =(randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ ),)
if include_encoder_hidden_states:
a__ : Dict =floats_tensor((batch_size, 3_2, 3_2) ).to(lowerCAmelCase__ )
if include_skip_sample:
a__ : Dict =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase__ , device=lowerCAmelCase__ )
return dummy_input
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a__ : Any =3_2
if self.block_type == "mid":
init_dict.pop("out_channels" )
a__ : Optional[int] =self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ , a__ : Union[str, Any] =self.prepare_init_args_and_inputs_for_common()
a__ : Dict =self.block_class(**lowerCAmelCase__ )
unet_block.to(lowerCAmelCase__ )
unet_block.eval()
with torch.no_grad():
a__ : List[Any] =unet_block(**lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : str =output[0]
self.assertEqual(output.shape , self.output_shape )
a__ : int =output[0, -1, -3:, -3:]
a__ : Any =torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase__ , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : Dict =self.prepare_init_args_and_inputs_for_common()
a__ : Any =self.block_class(**lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
a__ : int =model(**lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : List[str] =output[0]
a__ : List[Any] =torch.device(lowerCAmelCase__ )
a__ : List[str] =randn_tensor(output.shape , device=lowerCAmelCase__ )
a__ : List[Any] =torch.nn.functional.mse_loss(lowerCAmelCase__ , lowerCAmelCase__ )
loss.backward()
| 95
|
import numpy as np
def _A ( SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
| 1
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 365
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Union[str, Any] =["torch", "torchsde"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["torch", "torchsde"] )
@classmethod
def UpperCamelCase__ ( cls ,*_snake_case ,**_snake_case ):
requires_backends(cls ,["torch", "torchsde"] )
@classmethod
def UpperCamelCase__ ( cls ,*_snake_case ,**_snake_case ):
requires_backends(cls ,["torch", "torchsde"] )
| 67
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase )
A__ = size if size is not None else {"shortest_edge": 224}
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ = get_size_dict(lowercase , default_to_square=lowercase , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A__ = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> int:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(lowercase , param_name="size" , default_to_square=lowercase )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(lowercase , param_name="crop_size" , default_to_square=lowercase )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A__ = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 68
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = CLIPConfig
__lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase )
A__ = CLIPVisionModel(config.vision_config )
A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
A__ = []
A__ = image_embeds.shape[0]
for i in range(lowercase ):
A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ = special_cos_dist[i][concept_idx]
A__ = self.special_care_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ = cos_dist[i][concept_idx]
A__ = self.concept_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
A__ = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
A__ = cosine_distance(lowercase , self.special_care_embeds )
A__ = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
A__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ = torch.any(special_scores > 0 , dim=1 )
A__ = special_care * 0.01
A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 68
| 1
|
"""simple docstring"""
import baseaa
def lowercase__ ( lowercase_ ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
return baseaa.baadecode(lowercase_ ).decode("utf-8" )
if __name__ == "__main__":
lowerCamelCase__ = "Hello World!"
lowerCamelCase__ = baseaa_encode(test)
print(encoded)
lowerCamelCase__ = baseaa_decode(encoded)
print(decoded)
| 310
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = tmp_path / 'cache'
lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = tmp_path / 'cache'
lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with contextlib.closing(sqlitea.connect(__SCREAMING_SNAKE_CASE ) ) as con:
lowercase = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = tmp_path / 'cache'
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , 'tmp.sql' )
lowercase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(__SCREAMING_SNAKE_CASE , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
lowercase = iter_sql_file(__SCREAMING_SNAKE_CASE )
lowercase = iter_sql_file(__SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = tmp_path / 'cache'
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , 'tmp.sql' )
lowercase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(__SCREAMING_SNAKE_CASE , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
lowercase = iter_sql_file(__SCREAMING_SNAKE_CASE )
lowercase = iter_sql_file(__SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = tmp_path / 'cache'
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , 'tmp.sql' )
lowercase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE ).read()
with pytest.raises(__SCREAMING_SNAKE_CASE ):
SqlDatasetWriter(__SCREAMING_SNAKE_CASE , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 195
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase = logging.getLogger(__name__)
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = """sequence-classification"""
def __init__( self , snake_case ):
if type(snake_case ) == dict:
lowercase = Namespace(**snake_case )
lowercase = glue_output_modes[hparams.task]
lowercase = glue_tasks_num_labels[hparams.task]
super().__init__(snake_case , snake_case , self.mode )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return self.model(**snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
lowercase = self(**snake_case )
lowercase = outputs[0]
lowercase = self.trainer.lr_schedulers[0]['scheduler']
lowercase = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.hparams
lowercase = processors[args.task]()
lowercase = processor.get_labels()
for mode in ["train", "dev"]:
lowercase = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , snake_case )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
lowercase = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
lowercase = convert_examples_to_features(
snake_case , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , snake_case )
torch.save(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = False ):
lowercase = 'dev' if mode == 'test' else mode
lowercase = self._feature_file(snake_case )
logger.info('Loading features from cached file %s' , snake_case )
lowercase = torch.load(snake_case )
lowercase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowercase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowercase = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowercase = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case , shuffle=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
lowercase = self(**snake_case )
lowercase , lowercase = outputs[:2]
lowercase = logits.detach().cpu().numpy()
lowercase = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
lowercase = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowercase = np.argmax(snake_case , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowercase = np.squeeze(snake_case )
lowercase = np.concatenate([x['target'] for x in outputs] , axis=0 )
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
lowercase = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , snake_case , snake_case )}
lowercase = dict(results.items() )
lowercase = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase , lowercase = self._eval_end(snake_case )
lowercase = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase , lowercase = self._eval_end(snake_case )
lowercase = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case ):
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
'--max_seq_length' , default=128 , type=snake_case , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=snake_case , required=snake_case , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=snake_case , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCAmelCase_ ( ):
lowercase = argparse.ArgumentParser()
add_generic_args(__SCREAMING_SNAKE_CASE , os.getcwd() )
lowercase = GLUETransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , os.getcwd() )
lowercase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase = os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
lowercase = GLUETransformer(__SCREAMING_SNAKE_CASE )
lowercase = generic_train(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__SCREAMING_SNAKE_CASE ) )
lowercase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 195
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
UpperCAmelCase_ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
UpperCAmelCase_ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__lowerCamelCase = bs[:]
__lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
__lowerCamelCase = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
return pairs
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = ['input_ids', 'attention_mask']
def __init__( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]="replace" , UpperCamelCase_: str="<s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Optional[Any]="</s>" , UpperCamelCase_: Any="<s>" , UpperCamelCase_: Optional[Any]="<unk>" , UpperCamelCase_: int="<pad>" , UpperCamelCase_: Tuple="<mask>" , UpperCamelCase_: str=False , **UpperCamelCase_: Any , ):
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
__lowerCamelCase = json.load(UpperCamelCase_ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
__lowerCamelCase = errors # how to handle errors in decoding
__lowerCamelCase = bytes_to_unicode()
__lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="""utf-8""" ) as merges_handle:
__lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {}
__lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase__ ( self: Tuple ):
return len(self.encoder )
def lowerCAmelCase__ ( self: Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
if token in self.cache:
return self.cache[token]
__lowerCamelCase = tuple(UpperCamelCase_ )
__lowerCamelCase = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__lowerCamelCase = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase, __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(UpperCamelCase_ ):
try:
__lowerCamelCase = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(UpperCamelCase_ )
__lowerCamelCase = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__lowerCamelCase = get_pairs(UpperCamelCase_ )
__lowerCamelCase = """ """.join(UpperCamelCase_ )
__lowerCamelCase = word
return word
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int ):
__lowerCamelCase = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__lowerCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Any ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
return self.decoder.get(UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = """""".join(UpperCamelCase_ )
__lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + """\n""" )
__lowerCamelCase = 0
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__lowerCamelCase = token_index
writer.write(""" """.join(UpperCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int=False , **UpperCamelCase_: Dict ):
__lowerCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__lowerCamelCase = """ """ + text
return (text, kwargs)
| 29
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 1
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =len(UpperCamelCase_ )
for i in range(length - 1 ):
A__ : int =i
for k in range(i + 1, UpperCamelCase_ ):
if collection[k] < collection[least]:
A__ : Dict =k
if least != i:
A__ , A__ : Union[str, Any] =(collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case : Any = input('Enter numbers separated by a comma:\n').strip()
__snake_case : Any = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 134
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ):
super().__init__(*__snake_case , **__snake_case )
snake_case = eval_examples
snake_case = post_process_function
snake_case = quant_trainer_args
snake_case = 1_2_8 # default number of calibration samples
def a_ ( self , __snake_case=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
snake_case = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case = self._remove_unused_columns(__snake_case , description='''Calibration''' )
return DataLoader(
__snake_case , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__snake_case , )
def a_ ( self , __snake_case=None ):
snake_case = self.train_dataset if calib_dataset is None else calib_dataset
snake_case = self.get_calib_dataloader(__snake_case )
snake_case = self.model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args , calib=__snake_case )
model.eval()
quant_trainer.enable_calibration(__snake_case )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__snake_case ):
# Prediction step
snake_case , snake_case , snake_case = self.prediction_step(__snake_case , __snake_case , prediction_loss_only=__snake_case )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__snake_case , self.quant_trainer_args )
snake_case = model
def a_ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case = "eval" ):
snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
self.log(__snake_case )
else:
snake_case = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , __snake_case )
return metrics
def a_ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case = "test" ):
snake_case = self.get_test_dataloader(__snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions , '''predict''' )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__snake_case )
def a_ ( self , __snake_case="./" ):
snake_case = self.eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = next(iter(__snake_case ) )
# saving device - to make it consistent
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
snake_case = tuple(v.to(__snake_case ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
snake_case = True
snake_case = self.model.to(__snake_case )
model.eval()
model.float()
snake_case = model.module if hasattr(__snake_case , '''module''' ) else model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args )
snake_case = os.path.join(__snake_case , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
snake_case = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__snake_case , __snake_case , __snake_case , export_params=__snake_case , opset_version=1_3 , do_constant_folding=__snake_case , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__snake_case , )
logger.info('''onnx export finished''' )
| 127
| 0
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Optional[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_a : Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_a : Dict = {'facebook/blenderbot_small-90M': 512}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Tuple:
_lowerCAmelCase : str = set()
_lowerCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : int = char
_lowerCAmelCase : Optional[Any] = set(_lowerCamelCase )
return pairs
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__ , a__="__start__" , a__="__end__" , a__="__unk__" , a__="__null__" , **a__ , ):
super().__init__(unk_token=a__ , bos_token=a__ , eos_token=a__ , pad_token=a__ , **a__ )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase : Tuple = json.load(a__ )
_lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
with open(a__ , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase : int = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase : int = [tuple(merge.split() ) for merge in merges]
_lowerCAmelCase : str = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , a__ ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Optional[int] = re.sub("""([.,!?()])""" , r""" \1""" , a__ )
_lowerCAmelCase : Optional[int] = re.sub("""(')""" , r""" \1 """ , a__ )
_lowerCAmelCase : Union[str, Any] = re.sub(r"""\s{2,}""" , """ """ , a__ )
if "\n" in token:
_lowerCAmelCase : Dict = token.replace("""\n""" , """ __newln__""" )
_lowerCAmelCase : List[str] = token.split(""" """ )
_lowerCAmelCase : Any = []
for token in tokens:
if not len(a__ ):
continue
_lowerCAmelCase : str = token.lower()
_lowerCAmelCase : Optional[Any] = tuple(a__ )
_lowerCAmelCase : str = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_lowerCAmelCase : Any = get_pairs(a__ )
if not pairs:
words.append(a__ )
continue
while True:
_lowerCAmelCase : Dict = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase : Any = bigram
_lowerCAmelCase : str = []
_lowerCAmelCase : List[Any] = 0
while i < len(a__ ):
try:
_lowerCAmelCase : List[str] = word.index(a__ , a__ )
new_word.extend(word[i:j] )
_lowerCAmelCase : List[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[int] = tuple(a__ )
_lowerCAmelCase : str = new_word
if len(a__ ) == 1:
break
else:
_lowerCAmelCase : Tuple = get_pairs(a__ )
_lowerCAmelCase : Union[str, Any] = """@@ """.join(a__ )
_lowerCAmelCase : Optional[int] = word[:-4]
_lowerCAmelCase : List[str] = word
words.append(a__ )
return " ".join(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : int = re.findall(r"""\S+\n?""" , a__ )
for token in words:
split_tokens.extend(list(self.bpe(a__ ).split(""" """ ) ) )
return split_tokens
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = token.lower()
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __A ( self , a__ ):
return self.decoder.get(a__ , self.unk_token )
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = """ """.join(a__ ).replace("""@@ """ , """""" ).strip()
return out_string
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : List[str] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Tuple = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + """\n""" )
_lowerCAmelCase : Any = 0
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase : int = token_index
writer.write(""" """.join(a__ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 355
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 100 ) -> int:
_lowerCAmelCase : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
_lowerCAmelCase : Tuple = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 126
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = 'mgp-str'
def __init__( self, lowercase_=[32, 128], lowercase_=4, lowercase_=3, lowercase_=27, lowercase_=38, lowercase_=50257, lowercase_=30522, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=4.0, lowercase_=True, lowercase_=False, lowercase_=1E-5, lowercase_=0.0, lowercase_=0.0, lowercase_=0.0, lowercase_=False, lowercase_=0.02, **lowercase_, ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowercase_ )
a__ =image_size
a__ =patch_size
a__ =num_channels
a__ =max_token_length
a__ =num_character_labels
a__ =num_bpe_labels
a__ =num_wordpiece_labels
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =mlp_ratio
a__ =distilled
a__ =layer_norm_eps
a__ =drop_rate
a__ =qkv_bias
a__ =attn_drop_rate
a__ =drop_path_rate
a__ =output_aa_attentions
a__ =initializer_range
| 188
|
from math import factorial
def UpperCAmelCase__ ( _A : int = 1_00 ):
'''simple docstring'''
return sum(int(_A ) for x in str(factorial(_A ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 188
| 1
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 364
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 0
|
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = str(lowerCamelCase_ )
return len(lowerCamelCase_ ) == 9 and set(lowerCamelCase_ ) == set("""123456789""" )
def __A ( ):
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
SCREAMING_SNAKE_CASE : Tuple = 10_00_02 * base_num
if is_9_pandigital(lowerCamelCase_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
SCREAMING_SNAKE_CASE : str = 1_00_20_03 * base_num
if is_9_pandigital(lowerCamelCase_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 323
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323
| 1
|
"""simple docstring"""
class lowercase :
def __init__( self ,A__):
lowercase = len(A__)
lowercase = [0] * len_array
if len_array > 0:
lowercase = array[0]
for i in range(1 ,A__):
lowercase = self.prefix_sum[i - 1] + array[i]
def A__ ( self ,A__ ,A__):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A__ ( self ,A__):
lowercase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A__)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.inf
def set_batch_size(lowerCAmelCase__ ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and feature.dtype == "binary":
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase__ , lowerCAmelCase__ )
return None if batch_size is np.inf else batch_size
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = False ,A__ = False ,A__ = None ,**A__ ,):
super().__init__(
A__ ,split=A__ ,features=A__ ,cache_dir=A__ ,keep_in_memory=A__ ,streaming=A__ ,num_proc=A__ ,**A__ ,)
lowercase = path_or_paths if isinstance(A__ ,A__) else {self.split: path_or_paths}
lowercase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
lowercase = Parquet(
cache_dir=A__ ,data_files=A__ ,features=A__ ,hash=A__ ,**A__ ,)
def A__ ( self):
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=A__ ,download_mode=A__ ,verification_mode=A__ ,base_path=A__ ,num_proc=self.num_proc ,)
lowercase = self.builder.as_dataset(
split=self.split ,verification_mode=A__ ,in_memory=self.keep_in_memory)
return dataset
class lowercase :
def __init__( self ,A__ ,A__ ,A__ = None ,**A__ ,):
lowercase = dataset
lowercase = path_or_buf
lowercase = batch_size or get_writer_batch_size(dataset.features)
lowercase = parquet_writer_kwargs
def A__ ( self):
lowercase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike)):
with open(self.path_or_buf ,'''wb+''') as buffer:
lowercase = self._write(file_obj=A__ ,batch_size=A__ ,**self.parquet_writer_kwargs)
else:
lowercase = self._write(file_obj=self.path_or_buf ,batch_size=A__ ,**self.parquet_writer_kwargs)
return written
def A__ ( self ,A__ ,A__ ,**A__):
lowercase = 0
lowercase = parquet_writer_kwargs.pop('''path_or_buf''' ,A__)
lowercase = self.dataset.features.arrow_schema
lowercase = pq.ParquetWriter(A__ ,schema=A__ ,**A__)
for offset in logging.tqdm(
range(0 ,len(self.dataset) ,A__) ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating parquet from Arrow format''' ,):
lowercase = query_table(
table=self.dataset._data ,key=slice(A__ ,offset + batch_size) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(A__)
written += batch.nbytes
writer.close()
return written
| 97
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Tuple = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case (self ):
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__lowercase ).to(__lowercase )
__lowerCAmelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCAmelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = model(input_ids.to(__lowercase ) , labels=labels.to(__lowercase ) ).loss
__lowerCAmelCase = -(labels.shape[-1] * loss.item())
__lowerCAmelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 174
| 0
|
from ...processing_utils import ProcessorMixin
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''SpeechT5FeatureExtractor'''
lowerCAmelCase = '''SpeechT5Tokenizer'''
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
super().__init__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def __call__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : List[str] = kwargs.pop('''audio''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = kwargs.pop('''text''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = kwargs.pop('''text_target''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = kwargs.pop('''audio_target''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = kwargs.pop('''sampling_rate''' ,_SCREAMING_SNAKE_CASE )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
UpperCAmelCase_ : Optional[Any] = self.feature_extractor(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,sampling_rate=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
elif text is not None:
UpperCAmelCase_ : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = None
if audio_target is not None:
UpperCAmelCase_ : List[Any] = self.feature_extractor(audio_target=_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,sampling_rate=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = targets['''input_values''']
elif text_target is not None:
UpperCAmelCase_ : Optional[int] = self.tokenizer(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = targets['''input_ids''']
else:
UpperCAmelCase_ : Tuple = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_ : Dict = labels
UpperCAmelCase_ : Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase_ : List[Any] = decoder_attention_mask
return inputs
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Dict = kwargs.pop('''input_values''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''input_ids''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = kwargs.pop('''labels''' ,_SCREAMING_SNAKE_CASE )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
UpperCAmelCase_ : Tuple = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
elif input_ids is not None:
UpperCAmelCase_ : Tuple = self.tokenizer.pad(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and "input_ids" in labels[0]):
UpperCAmelCase_ : Tuple = self.tokenizer.pad(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = targets['''input_ids''']
else:
UpperCAmelCase_ : int = self.feature_extractor.feature_size
UpperCAmelCase_ : List[str] = self.feature_extractor.num_mel_bins
UpperCAmelCase_ : Optional[Any] = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = feature_size_hack
UpperCAmelCase_ : List[Any] = targets['''input_values''']
else:
UpperCAmelCase_ : Optional[int] = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_ : Optional[int] = labels
UpperCAmelCase_ : Union[str, Any] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase_ : List[Any] = decoder_attention_mask
return inputs
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 235
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__a = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = state_dict.pop(_lowercase )
UpperCAmelCase_ : Optional[int] = val
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ : List[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCAmelCase_ : Optional[Any] = value
else:
UpperCAmelCase_ : Union[str, Any] = value
return new_state_dict
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Any = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:256, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:256]
UpperCAmelCase_ : Tuple = in_proj_weight[256:512, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[256:512]
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[-256:, :]
UpperCAmelCase_ : str = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Optional[int] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:256, :]
UpperCAmelCase_ : List[str] = in_proj_bias[:256]
UpperCAmelCase_ : Optional[int] = in_proj_weight[256:512, :]
UpperCAmelCase_ : str = in_proj_bias[256:512]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[-256:, :]
UpperCAmelCase_ : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ : List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase_ : List[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ : str = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ : int = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ : Tuple = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = image.size
UpperCAmelCase_ : List[Any] = max(_lowercase , _lowercase )
UpperCAmelCase_ : Dict = 800 if '''detection''' in checkpoint_url else 1000
UpperCAmelCase_ : Any = target_max_size / current_max_size
UpperCAmelCase_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = F.to_tensor(_lowercase )
UpperCAmelCase_ : Optional[Any] = F.normalize(_lowercase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCAmelCase_ : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase_ : Optional[int] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : int = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ : int = state_dict.pop(_lowercase )
UpperCAmelCase_ : Dict = val
# create HuggingFace model and load state dict
UpperCAmelCase_ : str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Any = 15
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Union[str, Any] = {0: '''table''', 1: '''table rotated'''}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ : Optional[Any] = 125
UpperCAmelCase_ : Optional[Any] = 6
UpperCAmelCase_ : Dict = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCAmelCase_ : Optional[Any] = idalabel
UpperCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Union[str, Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCAmelCase_ : Union[str, Any] = TableTransformerForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion
UpperCAmelCase_ : str = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCAmelCase_ : Dict = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=_lowercase )
UpperCAmelCase_ : Dict = Image.open(_lowercase ).convert('''RGB''' )
UpperCAmelCase_ : Any = normalize(resize(_lowercase , _lowercase ) ).unsqueeze(0 )
UpperCAmelCase_ : Dict = model(_lowercase )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Any = (1, 15, 3)
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCAmelCase_ : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCAmelCase_ : Any = (1, 125, 7)
UpperCAmelCase_ : Any = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCAmelCase_ : str = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowercase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCAmelCase_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(_lowercase )
image_processor.push_to_hub(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 235
| 1
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = None ):
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase = quote(lowercase_ )
return hfh.hf_hub_url(lowercase_ , lowercase_ , repo_type='dataset' , revision=lowercase_ )
| 78
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , a_ : Dict , a_ : Union[str, Any]=7 , a_ : Optional[Any]=3 , a_ : List[str]=18 , a_ : Union[str, Any]=30 , a_ : Union[str, Any]=4_00 , a_ : Union[str, Any]=True , a_ : Tuple=None , a_ : Optional[int]=True , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18}
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : Optional[int] = min_resolution
__UpperCAmelCase : Union[str, Any] = max_resolution
__UpperCAmelCase : Tuple = do_resize
__UpperCAmelCase : List[str] = size
__UpperCAmelCase : List[Any] = apply_ocr
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( __UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = LayoutLMvaImageProcessingTester(self )
@property
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''apply_ocr''' ) )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def snake_case__ ( self : int ):
'''simple docstring'''
pass
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase : int = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase : List[Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCAmelCase : Any = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
__UpperCAmelCase : Optional[int] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__UpperCAmelCase : Any = image_processing(a_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCAmelCase : Any = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__UpperCAmelCase : Tuple = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
__UpperCAmelCase : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
__UpperCAmelCase : List[Any] = image_processing(a_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 226
| 0
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =git.Repo(search_parent_directories=__a )
lowerCamelCase__ : int ={
'''repo_id''': str(__a ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(__a , '''git_log.json''' ) , '''w''' ) as f:
json.dump(__a , __a , indent=4 )
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
if params.n_gpu <= 0:
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Optional[int] =-1
lowerCamelCase__ : Dict =True
lowerCamelCase__ : Optional[int] =False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCamelCase__ : Optional[Any] =int(os.environ['''WORLD_SIZE'''] )
lowerCamelCase__ : Optional[int] =int(os.environ['''N_GPU_NODE'''] )
lowerCamelCase__ : List[str] =int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCamelCase__ : int =params.world_size // params.n_gpu_per_node
lowerCamelCase__ : str =params.global_rank // params.n_gpu_per_node
lowerCamelCase__ : int =True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCamelCase__ : Union[str, Any] =1
lowerCamelCase__ : Dict =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : Dict =0
lowerCamelCase__ : Any =1
lowerCamelCase__ : Optional[Any] =1
lowerCamelCase__ : Union[str, Any] =False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCamelCase__ : str =params.node_id == 0 and params.local_rank == 0
lowerCamelCase__ : str =params.n_nodes > 1
# summary
lowerCamelCase__ : List[str] =f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 350
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : Dict = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowercase : str = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
_lowercase : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase : Any = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowercase : List[Any] = "allenai"
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : Optional[Any] =dict((re.sub(R'''@@$''' , '''''' , __lowerCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , __lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : Tuple ='''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : str =d[k] # restore
return da
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
# prep
assert os.path.exists(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Union[str, Any] =basename(__lowerCamelCase )
lowerCamelCase__ : str =dirname(__lowerCamelCase )
lowerCamelCase__ : Dict =fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : Union[str, Any] =cls.hub_models()
lowerCamelCase__ : Optional[Any] ={'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
lowerCamelCase__ : Any ='''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Optional[int] =hub_utils.from_pretrained(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , archive_map=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Any =vars(chkpt['''args''']['''model'''] )
lowerCamelCase__ : int =args['''source_lang''']
lowerCamelCase__ : Optional[Any] =args['''target_lang''']
lowerCamelCase__ : Dict =dirname(__lowerCamelCase )
lowerCamelCase__ : str =basename(__lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] =os.path.join(__lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : int =os.path.join(__lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict =Dictionary.load(__lowerCamelCase )
lowerCamelCase__ : List[str] =rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : Optional[int] =len(__lowerCamelCase )
lowerCamelCase__ : Dict =os.path.join(__lowerCamelCase , '''vocab-src.json''' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] =True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int =False
break
lowerCamelCase__ : Any =Dictionary.load(__lowerCamelCase )
lowerCamelCase__ : Tuple =rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : str =len(__lowerCamelCase )
lowerCamelCase__ : Dict =os.path.join(__lowerCamelCase , '''vocab-tgt.json''' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : Union[str, Any] =os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Tuple =os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
break
with open(__lowerCamelCase , encoding='''utf-8''' ) as fin:
lowerCamelCase__ : Optional[Any] =fin.read()
lowerCamelCase__ : List[Any] =re.sub(R''' \d+$''' , '''''' , __lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(__lowerCamelCase )
# model config
lowerCamelCase__ : List[Any] =os.path.join(__lowerCamelCase , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
lowerCamelCase__ : str ={
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
lowerCamelCase__ : Optional[int] =5
lowerCamelCase__ : List[str] =False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : Optional[int] =best_score_hparams[model_dir]['''length_penalty''']
else:
lowerCamelCase__ : Union[str, Any] =1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Any =os.path.join(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : int ={
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1024,
'''do_lower_case''': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# model
lowerCamelCase__ : int =chkpt['''models'''][0]
lowerCamelCase__ : Union[str, Any] =model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : Union[str, Any] =OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : str =[
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict =FSMTConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : int =FSMTForConditionalGeneration(__lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
# save
lowerCamelCase__ : Optional[int] =os.path.join(__lowerCamelCase , __lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(__lowerCamelCase , __lowerCamelCase )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : str = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 272
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case ={
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4
|
'''simple docstring'''
def a_ ( _lowerCAmelCase ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__lowerCamelCase : int = ''
while len(_lowerCAmelCase ) % 3 != 0:
__lowerCamelCase : str = '0' + bin_string
__lowerCamelCase : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(_lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__lowerCamelCase : Tuple = 0
for index, val in enumerate(_lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCAmelCase ) )
oct_string += str(_lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 208
| 0
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
__lowercase, __lowercase= np.shape(lowercase__ )
if rows != columns:
__lowercase= (
'\'table\' has to be of square shaped array but got a '
F'{rows}x{columns} array:\n{table}'
)
raise ValueError(lowercase__ )
__lowercase= np.zeros((rows, columns) )
__lowercase= np.zeros((rows, columns) )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
__lowercase= sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
__lowercase= (table[i][j] - total) / upper[j][j]
__lowercase= 1
for j in range(lowercase__ , lowercase__ ):
__lowercase= sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
__lowercase= table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 1
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[Any] = '''data2vec-audio'''
def __init__( self , _UpperCamelCase=3_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase="gelu" , _UpperCamelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase=(1_0, 3, 3, 3, 3, 2, 2) , _UpperCamelCase=False , _UpperCamelCase=1_6 , _UpperCamelCase=1_9 , _UpperCamelCase=5 , _UpperCamelCase=0.05 , _UpperCamelCase=1_0 , _UpperCamelCase=2 , _UpperCamelCase=0.0 , _UpperCamelCase=1_0 , _UpperCamelCase=0 , _UpperCamelCase="sum" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=2_5_6 , _UpperCamelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _UpperCamelCase=(5, 3, 3, 1, 1) , _UpperCamelCase=(1, 2, 3, 1, 1) , _UpperCamelCase=5_1_2 , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=2 , _UpperCamelCase=False , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=None , **_UpperCamelCase , ) -> Any:
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = feat_extract_activation
UpperCAmelCase_ : Optional[Any] = list(_UpperCamelCase )
UpperCAmelCase_ : int = list(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = list(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = conv_bias
UpperCAmelCase_ : int = num_conv_pos_embeddings
UpperCAmelCase_ : int = num_conv_pos_embedding_groups
UpperCAmelCase_ : List[str] = conv_pos_kernel_size
UpperCAmelCase_ : List[Any] = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Dict = activation_dropout
UpperCAmelCase_ : Optional[Any] = feat_proj_dropout
UpperCAmelCase_ : Any = final_dropout
UpperCAmelCase_ : Optional[Any] = layerdrop
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : int = mask_time_length
UpperCAmelCase_ : Tuple = mask_time_min_masks
UpperCAmelCase_ : Dict = mask_feature_prob
UpperCAmelCase_ : List[str] = mask_feature_length
UpperCAmelCase_ : Optional[Any] = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ : Dict = ctc_loss_reduction
UpperCAmelCase_ : Any = ctc_zero_infinity
# adapter
UpperCAmelCase_ : List[Any] = add_adapter
UpperCAmelCase_ : Tuple = adapter_kernel_size
UpperCAmelCase_ : Tuple = adapter_stride
UpperCAmelCase_ : Any = num_adapter_layers
UpperCAmelCase_ : List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = list(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = list(_UpperCamelCase )
UpperCAmelCase_ : int = xvector_output_dim
@property
def __UpperCAmelCase ( self ) -> str:
return math.prod(self.conv_stride )
| 29
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29
| 1
|
import sys
lowercase__ : Any = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = N) -> str:
a = -sys.maxsize - 1
for i in range(len(__UpperCamelCase) - 12):
a = 1
for j in range(13):
product *= int(n[i + j])
if product > largest_product:
a = product
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 358
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCamelCase__ ) , """Tatoeba directory does not exist.""" )
class a__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A )
@slow
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=A )
assert mmeta["long_pair"] == "heb-eng"
| 180
| 0
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Optional[Any] ) -> Dict:
"""simple docstring"""
assert isinstance(__A , __A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : List[Any] , __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
a_ : Any = tmp_path / 'cache'
a_ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ : str = JsonDatasetReader(__A , cache_dir=__A , keep_in_memory=__A ).read()
_check_json_dataset(__A , __A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Tuple , __A : Optional[int] ) -> Dict:
"""simple docstring"""
a_ : Any = tmp_path / 'cache'
a_ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : int = features.copy() if features else default_expected_features
a_ : str = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : Any = JsonDatasetReader(__A , features=__A , cache_dir=__A ).read()
_check_json_dataset(__A , __A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : List[Any] , __A : str ) -> Any:
"""simple docstring"""
a_ : Tuple = tmp_path / 'cache'
a_ : str = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a_ : Tuple = features.copy() if features else default_expected_features
a_ : str = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : List[str] = JsonDatasetReader(__A , features=__A , cache_dir=__A ).read()
assert isinstance(__A , __A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : List[str] ) -> Any:
"""simple docstring"""
a_ : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a_ : Optional[Any] = features.copy()
a_ : List[str] = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : List[Any] = tmp_path / 'cache'
a_ : Any = JsonDatasetReader(__A , features=__A , cache_dir=__A ).read()
assert isinstance(__A , __A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Optional[int] , __A : Any ) -> List[Any]:
"""simple docstring"""
a_ : List[Any] = tmp_path / 'cache'
a_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : str = JsonDatasetReader(__A , cache_dir=__A , split=__A ).read()
_check_json_dataset(__A , __A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[Any] , __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
if issubclass(__A , __A ):
a_ : Dict = jsonl_path
elif issubclass(__A , __A ):
a_ : Tuple = [jsonl_path]
a_ : List[str] = tmp_path / 'cache'
a_ : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : int = JsonDatasetReader(__A , cache_dir=__A ).read()
_check_json_dataset(__A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Tuple , __A : Tuple=("train",) ) -> int:
"""simple docstring"""
assert isinstance(__A , __A )
for split in splits:
a_ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : Dict , __A : Any ) -> Union[str, Any]:
"""simple docstring"""
a_ : Any = tmp_path / 'cache'
a_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ : List[Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=__A , keep_in_memory=__A ).read()
_check_json_datasetdict(__A , __A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Any , __A : List[Any] ) -> List[str]:
"""simple docstring"""
a_ : List[Any] = tmp_path / 'cache'
a_ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : Union[str, Any] = features.copy() if features else default_expected_features
a_ : Tuple = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : Dict = JsonDatasetReader({'train': jsonl_path} , features=__A , cache_dir=__A ).read()
_check_json_datasetdict(__A , __A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : str , __A : List[str] ) -> Any:
"""simple docstring"""
if split:
a_ : Union[str, Any] = {split: jsonl_path}
else:
a_ : str = 'train'
a_ : List[Any] = {'train': jsonl_path, 'test': jsonl_path}
a_ : Optional[Any] = tmp_path / 'cache'
a_ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : Optional[int] = JsonDatasetReader(__A , cache_dir=__A ).read()
_check_json_datasetdict(__A , __A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return json.load(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return [json.loads(__A ) for line in buffer]
class SCREAMING_SNAKE_CASE__ :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
a_ : Union[str, Any] = load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 1_0
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
a_ : List[Any] = load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 1_0
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
a_ : Optional[int] = load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 1_0
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
a_ : str = load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 1_0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
a_ : Optional[int] = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
a_ : int = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
a_ : Optional[Any] = f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
a_ : List[str] = f.read()
assert exported_content == original_content
| 32
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value
UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value
UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase = 0
for count in range(__UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase = count + min_val
i += 1
def lowercase__ ( )-> Any:
UpperCamelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCamelCase )
print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 321
| 0
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
__snake_case : List[Any] = nn.Parameter(__lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
__snake_case : Any = nn.Parameter(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# set torch weights for 1-to-1 comparison
__snake_case : List[Any] = np.asarray(weights[0] )
__snake_case : Optional[int] = np.asarray(weights[1] )
__snake_case : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCamelCase ).view(-1 , __lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# set torch weights for 1-to-1 comparison
__snake_case : int = np.asarray(weights[0] )
__snake_case : Any = np.asarray(weights[1] )
__snake_case : List[Any] = np.asarray(weights[2] )
__snake_case : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCamelCase ).view(-1 , __lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# layernorm 1
__snake_case : List[Any] = weights[0][0][0]
__snake_case : Any = np.asarray(layer_norm_a[0] )
__snake_case : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# lsh weights + output
__snake_case : Union[str, Any] = weights[0][1]
if len(__lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCamelCase , torch_block.attention , __lowerCamelCase )
else:
set_layer_weights_in_torch_local(__lowerCamelCase , torch_block.attention , __lowerCamelCase )
# intermediate weighs
__snake_case : Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCamelCase ) == 4:
__snake_case : Dict = intermediate_weights[2]
# layernorm 2
__snake_case : Tuple = np.asarray(intermediate_weights[0][0] )
__snake_case : Optional[int] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# intermediate dense
__snake_case : Optional[int] = np.asarray(intermediate_weights[1][0] )
__snake_case : List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
# intermediate out
__snake_case : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
__snake_case : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# reformer model
__snake_case : Tuple = torch_model.reformer
# word embeds
__snake_case : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCamelCase ) , )
if isinstance(weights[3] , __lowerCamelCase ):
__snake_case : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__snake_case : Union[str, Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
__snake_case : Optional[int] = nn.Parameter(torch.tensor(__lowerCamelCase ) )
__snake_case : Tuple = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__snake_case : Optional[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# output layer norm
__snake_case : Optional[int] = np.asarray(weights[7][0] )
__snake_case : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# output embeddings
__snake_case : List[str] = np.asarray(weights[9][0] )
__snake_case : str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Initialise PyTorch model
__snake_case : List[Any] = ReformerConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
__snake_case : Optional[Any] = ReformerModelWithLMHead(__lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as f:
__snake_case : int = pickle.load(__lowerCamelCase )["weights"]
set_model_weights_in_torch(__lowerCamelCase , __lowerCamelCase , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 367
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_snake_case : Union[str, Any] = ["small", "medium", "large"]
_snake_case : List[Any] = "lm_head.decoder.weight"
_snake_case : Optional[Any] = "lm_head.weight"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = torch.load(__lowerCamelCase )
__snake_case : Dict = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
_snake_case : Any = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_snake_case : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
_snake_case : List[str] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 134
| 0
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _a , _a , _a , unittest.TestCase ):
_a = StableUnCLIPPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_a = False
def __lowercase ( self : Any ):
lowerCAmelCase = 32
lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase )
lowerCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase , layers_per_block=1 , upcast_attention=lowerCAmelCase , use_linear_projection=lowerCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL()
lowerCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=0 ):
if str(lowerCAmelCase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase )
def __lowercase ( self : Tuple ):
lowerCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Any ):
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase = pipe("""anime turle""" , generator=lowerCAmelCase , output_type="""np""" )
lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 155
|
"""simple docstring"""
def lowercase (snake_case__ : list[int] , snake_case__ : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(snake_case__ ) == len(snake_case__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = equationa
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = equationa
# Calculate the determinants of the matrices
lowerCAmelCase = aa * ba - aa * ba
lowerCAmelCase = ca * ba - ca * ba
lowerCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCAmelCase = determinant_x / determinant
lowerCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 155
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
lowercase =TypeVar('T')
class __magic_name__ ( Generic[T] ):
def __init__( self , snake_case) -> None:
'''simple docstring'''
_UpperCAmelCase : Dict =data
_UpperCAmelCase : str =self
_UpperCAmelCase : Tuple =0
class __magic_name__ ( Generic[T] ):
def __init__( self) -> None:
'''simple docstring'''
# map from node name to the node object
_UpperCAmelCase : dict[T, DisjointSetTreeNode[T]] ={}
def lowerCAmelCase ( self , snake_case) -> None:
'''simple docstring'''
# create a new set with x as its member
_UpperCAmelCase : Any =DisjointSetTreeNode(snake_case)
def lowerCAmelCase ( self , snake_case) -> DisjointSetTreeNode[T]:
'''simple docstring'''
# find the set x belongs to (with path-compression)
_UpperCAmelCase : Dict =self.map[data]
if elem_ref != elem_ref.parent:
_UpperCAmelCase : Dict =self.find_set(elem_ref.parent.data)
return elem_ref.parent
def lowerCAmelCase ( self , snake_case , snake_case) -> None:
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
_UpperCAmelCase : str =nodea
else:
_UpperCAmelCase : Union[str, Any] =nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCAmelCase ( self , snake_case , snake_case) -> None:
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(snake_case) , self.find_set(snake_case))
class __magic_name__ ( Generic[T] ):
def __init__( self) -> None:
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
_UpperCAmelCase : dict[T, dict[T, int]] ={}
def lowerCAmelCase ( self , snake_case) -> None:
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
_UpperCAmelCase : Dict ={}
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> None:
'''simple docstring'''
# add an edge with the given weight
self.add_node(snake_case)
self.add_node(snake_case)
_UpperCAmelCase : Any =weight
_UpperCAmelCase : List[str] =weight
def lowerCAmelCase ( self) -> GraphUndirectedWeighted[T]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =[]
_UpperCAmelCase : Union[str, Any] =set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start))
edges.append((start, end, self.connections[start][end]))
edges.sort(key=lambda snake_case: x[2])
# creating the disjoint set
_UpperCAmelCase : Any =DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case)
# MST generation
_UpperCAmelCase : Any =0
_UpperCAmelCase : Optional[Any] =0
_UpperCAmelCase : Dict =GraphUndirectedWeighted[T]()
while num_edges < len(self.connections) - 1:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =edges[index]
index += 1
_UpperCAmelCase : Optional[int] =disjoint_set.find_set(snake_case)
_UpperCAmelCase : List[Any] =disjoint_set.find_set(snake_case)
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case , snake_case , snake_case)
disjoint_set.union(snake_case , snake_case)
return graph
| 242
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , **snake_case) -> None:
'''simple docstring'''
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case)
| 242
| 1
|
'''simple docstring'''
import math
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(__snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( __snake_case : int = 10_001 ) -> int:
"""simple docstring"""
try:
A__ : List[Any] =int(__snake_case )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
A__ : list[int] =[]
A__ : Optional[Any] =2
while len(__snake_case ) < nth:
if is_prime(__snake_case ):
primes.append(__snake_case )
num += 1
else:
num += 1
return primes[len(__snake_case ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 134
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__snake_case : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[str] , **lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : int , lowerCAmelCase_ : Union[np.ndarray, bytes, str] , **lowerCAmelCase_ : Tuple ) -> Dict:
'''simple docstring'''
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[Any] ={}
if "candidate_labels" in kwargs:
A__ : List[Any] =kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A__ : Optional[Any] =kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str="This is a sound of {}." ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A__ : Any =requests.get(lowerCAmelCase_ ).content
else:
with open(lowerCAmelCase_ , """rb""" ) as f:
A__ : Dict =f.read()
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : str =ffmpeg_read(lowerCAmelCase_ , self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase_ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A__ : int =self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A__ : Union[str, Any] =candidate_labels
A__ : Tuple =[hypothesis_template.format(lowerCAmelCase_ ) for x in candidate_labels]
A__ : Dict =self.tokenizer(lowerCAmelCase_ , return_tensors=self.framework , padding=lowerCAmelCase_ )
A__ : int =[text_inputs]
return inputs
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =model_inputs.pop("""candidate_labels""" )
A__ : List[str] =model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowerCAmelCase_ ):
A__ : List[str] =text_inputs[0]
else:
# Batching case.
A__ : Optional[Any] =text_inputs[0][0]
A__ : List[str] =self.model(**lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : List[str] ={
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
A__ : Any =model_outputs.pop("""candidate_labels""" )
A__ : str =model_outputs["""logits"""][0]
if self.framework == "pt":
A__ : str =logits.softmax(dim=0 )
A__ : List[str] =probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A__ : List[str] =[
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase_ , lowerCAmelCase_ ) , key=lambda lowerCAmelCase_ : -x[0] )
]
return result
| 134
| 1
|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
__snake_case , __snake_case : int = head.next, head
while fast and fast.next:
__snake_case : str = fast.next.next
__snake_case : Union[str, Any] = slow.next
__snake_case : int = slow.next
__snake_case : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
__snake_case : int = None
while second:
__snake_case : int = second.next
__snake_case : List[Any] = node
__snake_case : Any = second
__snake_case : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__snake_case : Optional[Any] = node.next
__snake_case : Optional[int] = head.next
return True
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__snake_case : Tuple = head
while fast and fast.next:
__snake_case , __snake_case : List[str] = fast.next.next, slow.next
# 2. Push the second half into the stack
__snake_case : Tuple = [slow.val]
while slow.next:
__snake_case : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__snake_case : List[Any] = cur.next
return True
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if not head or not head.next:
return True
__snake_case : Union[str, Any] = {}
__snake_case : List[str] = 0
while head:
if head.val in d:
d[head.val].append(UpperCamelCase__ )
else:
__snake_case : int = [pos]
__snake_case : Union[str, Any] = head.next
pos += 1
__snake_case : Optional[Any] = pos - 1
__snake_case : int = 0
for v in d.values():
if len(UpperCamelCase__ ) % 2 != 0:
middle += 1
else:
__snake_case : str = 0
for i in range(0 , len(UpperCamelCase__ ) ):
if v[i] + v[len(UpperCamelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "xlm"
A : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_01_45 , _lowerCAmelCase : Optional[Any]=20_48 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=5_12 , _lowerCAmelCase : List[Any]=20_48**-0.5 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = emb_dim
__snake_case : int = n_layers
__snake_case : List[str] = n_heads
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = gelu_activation
__snake_case : Tuple = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : int = n_langs
__snake_case : str = use_lang_emb
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Union[str, Any] = eos_index
__snake_case : Dict = pad_index
__snake_case : Any = unk_index
__snake_case : Dict = mask_index
__snake_case : Any = is_encoder
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = embed_init_std
__snake_case : List[Any] = init_std
__snake_case : str = summary_type
__snake_case : Optional[Any] = summary_use_proj
__snake_case : str = summary_activation
__snake_case : Optional[int] = summary_proj_to_labels
__snake_case : Dict = summary_first_dropout
__snake_case : Dict = start_n_top
__snake_case : int = end_n_top
__snake_case : str = mask_token_id
__snake_case : int = lang_id
if "n_words" in kwargs:
__snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 20
| 0
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a__ : Any ='''pt'''
elif is_tf_available():
a__ : Tuple ='''tf'''
else:
a__ : List[str] ='''jax'''
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =PerceiverTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] =False
def _lowerCamelCase ( self : int ):
super().setUp()
__UpperCamelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self : Optional[Any] ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def _lowerCamelCase ( self : str , **__A : Tuple ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _lowerCamelCase ( self : List[str] , __A : Union[str, Any] , __A : Dict=False , __A : Any=2_0 , __A : Tuple=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__UpperCamelCase = []
for i in range(len(__A ) ):
try:
__UpperCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=__A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCamelCase = list(filter(lambda __A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __A ) )
__UpperCamelCase = list(filter(lambda __A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__A ) , __A ) )
if max_length is not None and len(__A ) > max_length:
__UpperCamelCase = toks[:max_length]
if min_length is not None and len(__A ) < min_length and len(__A ) > 0:
while len(__A ) < min_length:
__UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
__UpperCamelCase = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
if " " not in output_txt and len(__A ) > 1:
__UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__A )
)
if with_prefix_space:
__UpperCamelCase = ' ' + output_txt
__UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A )
return output_txt, output_ids
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = 'Unicode €.'
__UpperCamelCase = tokenizer(__A )
__UpperCamelCase = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , __A )
# decoding
__UpperCamelCase = tokenizer.decode(__A )
self.assertEqual(__A , '[CLS]Unicode €.[SEP]' )
__UpperCamelCase = tokenizer('e è é ê ë' )
__UpperCamelCase = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , __A )
# decoding
__UpperCamelCase = tokenizer.decode(__A )
self.assertEqual(__A , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__UpperCamelCase = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
__UpperCamelCase = tokenizer(__A , padding=__A , return_tensors=__A )
self.assertIsInstance(__A , __A )
if FRAMEWORK != "jax":
__UpperCamelCase = list(batch.input_ids.numpy()[0] )
else:
__UpperCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__A , __A )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__UpperCamelCase = tokenizer(__A , padding=__A , return_tensors=__A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __A )
self.assertIn('attention_mask' , __A )
self.assertNotIn('decoder_input_ids' , __A )
self.assertNotIn('decoder_attention_mask' , __A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = [
'Summary of the text.',
'Another summary.',
]
__UpperCamelCase = tokenizer(
text_target=__A , max_length=3_2 , padding='max_length' , truncation=__A , return_tensors=__A )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def _lowerCamelCase ( self : Dict ):
# safety check on max_len default value so we are sure the test works
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = ' He is very happy, UNwant\u00E9d,running'
__UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A )
tokenizer.save_pretrained(__A )
__UpperCamelCase = tokenizer.__class__.from_pretrained(__A )
__UpperCamelCase = after_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
shutil.rmtree(__A )
__UpperCamelCase = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__UpperCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A )
tokenizer.save_pretrained(__A )
__UpperCamelCase = tokenizer.__class__.from_pretrained(__A )
__UpperCamelCase = after_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__UpperCamelCase = tokenizer.__class__.from_pretrained(__A , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__A )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__A )
with open(os.path.join(__A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase = json.load(__A )
with open(os.path.join(__A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase = json.load(__A )
__UpperCamelCase = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
__UpperCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
__UpperCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__A , __A )
with open(os.path.join(__A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__A , __A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCamelCase = tokenizer_class.from_pretrained(
__A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCamelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__A )]
__UpperCamelCase = tokenizer_class.from_pretrained(
__A , additional_special_tokens=__A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def _lowerCamelCase ( self : Dict ):
pass
def _lowerCamelCase ( self : List[str] ):
pass
def _lowerCamelCase ( self : Tuple ):
pass
def _lowerCamelCase ( self : Optional[Any] ):
pass
def _lowerCamelCase ( self : str ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__UpperCamelCase = self.get_tokenizers(fast=__A , do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__UpperCamelCase = tokenizer.convert_tokens_to_string(__A )
self.assertIsInstance(__A , __A )
| 53
|
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
for i in range(len(lowercase ) - 1 , 0 , -1 ):
lowerCamelCase_ = False
for j in range(lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase_ , lowerCamelCase_ = unsorted[j - 1], unsorted[j]
lowerCamelCase_ = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase_ , lowerCamelCase_ = unsorted[j + 1], unsorted[j]
lowerCamelCase_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(",")]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 204
| 0
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCamelCase : str = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCamelCase : Union[str, Any] = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
UpperCamelCase : Any = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = CHRF.CHAR_ORDER , __UpperCAmelCase = CHRF.WORD_ORDER , __UpperCAmelCase = CHRF.BETA , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , ):
'''simple docstring'''
__UpperCamelCase = len(references[0] )
if any(len(__UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__UpperCamelCase = [[refs[i] for refs in references] for i in range(__UpperCAmelCase )]
__UpperCamelCase = CHRF(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = sb_chrf.corpus_score(__UpperCAmelCase , __UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 358
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'num_attention_heads' ) )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[128, 256, 384] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = kernel_size
__UpperCamelCase = stride
__UpperCamelCase = padding
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_attention_heads
__UpperCamelCase = depths
__UpperCamelCase = key_dim
__UpperCamelCase = drop_path_rate
__UpperCamelCase = patch_size
__UpperCamelCase = attention_ratio
__UpperCamelCase = mlp_ratio
__UpperCamelCase = initializer_range
__UpperCamelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = num_labels
__UpperCamelCase = initializer_range
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = LevitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = (self.image_size, self.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__UpperCamelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = LevitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__UpperCamelCase = (self.model_tester.image_size, self.model_tester.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__UpperCamelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__UpperCamelCase = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
__UpperCamelCase = problem_type['title']
__UpperCamelCase = problem_type['num_labels']
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
__UpperCamelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__UpperCamelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
__UpperCamelCase = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = LevitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> Union[str, Any]:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 263
| 0
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """efficientnet"""
def __init__( self , A = 3 , A = 6_0_0 , A = 2.0 , A = 3.1 , A = 8 , A = [3, 3, 5, 3, 5, 5, 3] , A = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , A = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , A = [] , A = [1, 2, 2, 2, 1, 2, 1] , A = [1, 2, 2, 3, 3, 4, 1] , A = [1, 6, 6, 6, 6, 6, 6] , A = 0.25 , A = "swish" , A = 2_5_6_0 , A = "mean" , A = 0.02 , A = 0.0_01 , A = 0.99 , A = 0.5 , A = 0.2 , **A , ) -> int:
super().__init__(**A )
snake_case : int = num_channels
snake_case : int = image_size
snake_case : Tuple = width_coefficient
snake_case : Any = depth_coefficient
snake_case : Tuple = depth_divisor
snake_case : List[Any] = kernel_sizes
snake_case : Union[str, Any] = in_channels
snake_case : List[Any] = out_channels
snake_case : Union[str, Any] = depthwise_padding
snake_case : Tuple = strides
snake_case : str = num_block_repeats
snake_case : Tuple = expand_ratios
snake_case : int = squeeze_expansion_ratio
snake_case : Union[str, Any] = hidden_act
snake_case : List[Any] = hidden_dim
snake_case : List[Any] = pooling_type
snake_case : Optional[Any] = initializer_range
snake_case : Optional[Any] = batch_norm_eps
snake_case : Union[str, Any] = batch_norm_momentum
snake_case : int = dropout_rate
snake_case : Union[str, Any] = drop_connect_rate
snake_case : int = sum(A ) * 4
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = version.parse("""1.11""" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1e-5
| 124
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = XLMRobertaTokenizer
_snake_case = XLMRobertaTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Tuple = XLMRobertaTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> int:
snake_case : str = """<pad>"""
snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_0_0_2 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = XLMRobertaTokenizer(A , keep_accents=A )
snake_case : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case : Any = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase ( self ) -> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
snake_case : int = self.tokenizer_class.from_pretrained(A , **A )
snake_case : Optional[int] = tempfile.mkdtemp()
snake_case : List[Any] = tokenizer_r.save_pretrained(A )
snake_case : Optional[Any] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case : Union[str, Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
snake_case : str = tokenizer_r.from_pretrained(A )
snake_case : Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Tuple = tokenizer_r.save_pretrained(A , legacy_format=A )
snake_case : Any = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
snake_case : int = tokenizer_r.from_pretrained(A )
snake_case : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
snake_case : Tuple = tempfile.mkdtemp()
snake_case : str = tokenizer_r.save_pretrained(A , legacy_format=A )
snake_case : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : Dict = tokenizer_r.from_pretrained(A )
snake_case : List[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@cached_property
def UpperCAmelCase ( self ) -> str:
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def UpperCAmelCase ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A , f.name )
snake_case : Any = XLMRobertaTokenizer(f.name , keep_accents=A )
snake_case : List[Any] = pickle.dumps(A )
pickle.loads(A )
def UpperCAmelCase ( self ) -> Any:
if not self.test_rust_tokenizer:
return
snake_case : str = self.get_tokenizer()
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : str = """I was born in 92000, and this is falsé."""
snake_case : Optional[int] = tokenizer.tokenize(A )
snake_case : List[Any] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
snake_case : List[Any] = tokenizer.encode(A , add_special_tokens=A )
snake_case : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
snake_case : Optional[Any] = self.get_rust_tokenizer()
snake_case : str = tokenizer.encode(A )
snake_case : Dict = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Dict = """Hello World!"""
snake_case : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase ( self ) -> str:
snake_case : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
snake_case : Tuple = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase ( self ) -> str:
# fmt: off
snake_case : Tuple = {"""input_ids""": [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 124
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCamelCase__ : Any ='''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
lowerCamelCase__ : List[Any] =Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
return image
def _UpperCAmelCase ( __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[]
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : str =dct.pop(snake_case_ )
lowerCamelCase__ : Any =val
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase__ : Optional[int] =state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowerCamelCase__ : Optional[Any] =state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowerCamelCase__ : Optional[int] =torch.cat((q_bias, torch.zeros_like(snake_case_ , requires_grad=snake_case_ ), v_bias) )
lowerCamelCase__ : Optional[Any] =qkv_bias
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : int =364 if '''coco''' in model_name else 224
lowerCamelCase__ : List[Any] =BlipaVisionConfig(image_size=snake_case_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCamelCase__ : List[Any] =OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=snake_case_ ).to_dict()
elif "opt-6.7b" in model_name:
lowerCamelCase__ : Dict =OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=snake_case_ ).to_dict()
elif "t5-xl" in model_name:
lowerCamelCase__ : Tuple =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase__ : int =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
lowerCamelCase__ : Optional[Any] =BlipaConfig(vision_config=snake_case_ , text_config=snake_case_ )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : int =(
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
lowerCamelCase__ : str =tokenizer('''\n''' , add_special_tokens=snake_case_ ).input_ids[0]
lowerCamelCase__ , lowerCamelCase__ : Dict =get_blipa_config(snake_case_ , eos_token_id=snake_case_ )
lowerCamelCase__ : Dict =BlipaForConditionalGeneration(snake_case_ ).eval()
lowerCamelCase__ : str ={
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
lowerCamelCase__ , lowerCamelCase__ : Tuple =model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowerCamelCase__ : List[Any] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =load_model_and_preprocess(
name=snake_case_ , model_type=snake_case_ , is_eval=snake_case_ , device=snake_case_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowerCamelCase__ : Optional[Any] =original_model.state_dict()
lowerCamelCase__ : Tuple =create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase__ : Optional[int] =state_dict.pop(snake_case_ )
if key.startswith('''Qformer.bert''' ):
lowerCamelCase__ : Optional[int] =key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowerCamelCase__ : Any =key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
lowerCamelCase__ : int =key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowerCamelCase__ : List[str] =key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
lowerCamelCase__ : Dict =key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
lowerCamelCase__ : List[str] =key.replace('''t5''' , '''language''' )
lowerCamelCase__ : int =val
# read in qv biases
read_in_q_v_bias(snake_case_ , snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =hf_model.load_state_dict(snake_case_ , strict=snake_case_ )
assert len(snake_case_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCamelCase__ : List[str] =load_demo_image()
lowerCamelCase__ : str =vis_processors['''eval'''](snake_case_ ).unsqueeze(0 ).to(snake_case_ )
lowerCamelCase__ : Any =tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(snake_case_ )
# create processor
lowerCamelCase__ : Union[str, Any] =BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=snake_case_ , image_std=snake_case_ )
lowerCamelCase__ : str =BlipaProcessor(image_processor=snake_case_ , tokenizer=snake_case_ )
lowerCamelCase__ : Union[str, Any] =processor(images=snake_case_ , return_tensors='''pt''' ).pixel_values.to(snake_case_ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case_ , snake_case_ )
original_model.to(snake_case_ )
hf_model.to(snake_case_ )
with torch.no_grad():
if "opt" in model_name:
lowerCamelCase__ : str =original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
lowerCamelCase__ : Any =hf_model(snake_case_ , snake_case_ ).logits
else:
lowerCamelCase__ : Union[str, Any] =original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
lowerCamelCase__ : Union[str, Any] =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowerCamelCase__ : str =hf_model(snake_case_ , snake_case_ , labels=snake_case_ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCamelCase__ : str =torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case_ )
assert torch.allclose(logits[0, :3, :3] , snake_case_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCamelCase__ : Union[str, Any] =torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case_ )
else:
# cast to same type
lowerCamelCase__ : str =logits.dtype
assert torch.allclose(original_logits.to(snake_case_ ) , snake_case_ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
lowerCamelCase__ : str =''''''
lowerCamelCase__ : Tuple =tokenizer(snake_case_ , return_tensors='''pt''' ).input_ids.to(snake_case_ )
lowerCamelCase__ : Tuple =original_model.generate({'''image''': original_pixel_values} )
lowerCamelCase__ : Any =hf_model.generate(
snake_case_ , snake_case_ , do_sample=snake_case_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , snake_case_ )
lowerCamelCase__ : Dict =input_ids.shape[1]
lowerCamelCase__ : int =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case_ )
lowerCamelCase__ : Any =[text.strip() for text in output_text]
print('''HF generation:''' , snake_case_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
_lowercase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowercase : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 368
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1.0e4 , __lowerCamelCase : bool = False , __lowerCamelCase : float = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowerCamelCase__ : Any =float(embedding_dim // 2 )
lowerCamelCase__ : List[str] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCamelCase__ : int =min_timescale * jnp.exp(jnp.arange(__lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCamelCase__ : Tuple =jnp.expand_dims(__lowerCamelCase , 1 ) * jnp.expand_dims(__lowerCamelCase , 0 )
# scale embeddings
lowerCamelCase__ : List[str] =scale * emb
if flip_sin_to_cos:
lowerCamelCase__ : int =jnp.concatenate([jnp.cos(__lowerCamelCase ), jnp.sin(__lowerCamelCase )] , axis=1 )
else:
lowerCamelCase__ : List[str] =jnp.concatenate([jnp.sin(__lowerCamelCase ), jnp.cos(__lowerCamelCase )] , axis=1 )
lowerCamelCase__ : str =jnp.reshape(__lowerCamelCase , [jnp.shape(__lowerCamelCase )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = jnp.floataa
@nn.compact
def __call__( self : Optional[Any], lowerCamelCase : int )-> Any:
lowerCamelCase__ : Optional[Any] =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_1''' )(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_2''' )(lowerCamelCase )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = False
_a = 1
@nn.compact
def __call__( self : Any, lowerCamelCase : int )-> int:
return get_sinusoidal_embeddings(
lowerCamelCase, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
| 272
| 0
|
'''simple docstring'''
from math import isqrt, loga
def __UpperCAmelCase ( A : int ) -> list[int]:
UpperCAmelCase_ : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A , A ):
UpperCAmelCase_ : int = False
return [i for i in range(2 , A ) if is_prime[i]]
def __UpperCAmelCase ( A : int = 8_0_0_8_0_0 , A : int = 8_0_0_8_0_0 ) -> int:
UpperCAmelCase_ : Union[str, Any] = degree * loga(A )
UpperCAmelCase_ : Tuple = int(A )
UpperCAmelCase_ : Any = calculate_prime_numbers(A )
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[Any] = len(A ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 304
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]:
if isinstance(A , A ):
UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A )
else:
UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase_ : Dict = tensor[:sequence_length]
else:
if isinstance(A , A ):
UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( A : List[Any] ) -> str:
UpperCAmelCase_ : Dict = ord(A )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def A ( self : List[Any] , _A : Dict ) -> Tuple:
import torch
UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : Tuple = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Optional[Any] = [
list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels
]
else:
UpperCAmelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels
]
UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A )
UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features]
UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A )
UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 304
| 1
|
def lowercase_ ( A__ ) -> list:
"""simple docstring"""
if len(A__ ) <= 1:
return [tuple(A__ )]
snake_case = []
def generate(A__ , A__ ):
snake_case = [0] * n
res.append(tuple(A__ ) )
snake_case = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
snake_case , snake_case = arr[i], arr[0]
else:
snake_case , snake_case = arr[i], arr[c[i]]
res.append(tuple(A__ ) )
c[i] += 1
snake_case = 0
else:
snake_case = 0
i += 1
generate(len(A__ ) , A__ )
return res
if __name__ == "__main__":
_A = input("Enter numbers separated by a comma:\n").strip()
_A = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 137
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : List[Any] = "segformer"
def __init__(self : List[Any] , _A : int=3 , _A : List[Any]=4 , _A : Any=[2, 2, 2, 2] , _A : Dict=[8, 4, 2, 1] , _A : List[Any]=[3_2, 6_4, 1_6_0, 2_5_6] , _A : Tuple=[7, 3, 3, 3] , _A : Optional[int]=[4, 2, 2, 2] , _A : Dict=[1, 2, 5, 8] , _A : int=[4, 4, 4, 4] , _A : Dict="gelu" , _A : Tuple=0.0 , _A : Optional[Any]=0.0 , _A : List[Any]=0.1 , _A : Union[str, Any]=0.02 , _A : Dict=0.1 , _A : List[Any]=1E-6 , _A : List[str]=2_5_6 , _A : Optional[Any]=2_5_5 , **_A : str , ) -> Tuple:
super().__init__(**_A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , _A , )
snake_case = num_channels
snake_case = num_encoder_blocks
snake_case = depths
snake_case = sr_ratios
snake_case = hidden_sizes
snake_case = patch_sizes
snake_case = strides
snake_case = mlp_ratios
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = classifier_dropout_prob
snake_case = initializer_range
snake_case = drop_path_rate
snake_case = layer_norm_eps
snake_case = decoder_hidden_size
snake_case = kwargs.get("reshape_last_stage" , _A )
snake_case = semantic_loss_ignore_index
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Optional[Any] = version.parse("1.11" )
@property
def UpperCAmelCase(self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase(self : Tuple ) -> float:
return 1E-4
@property
def UpperCAmelCase(self : List[str] ) -> int:
return 1_2
| 137
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list[tuple[int, int]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = position
lowerCamelCase__ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase__ : Dict = []
for position in positions:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCAmelCase )
return permissible_positions
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if is_complete(_UpperCAmelCase ):
return True
for position in get_valid_pos(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if board[y][x] == 0:
lowerCamelCase__ : List[Any] = curr + 1
if open_knight_tour_helper(_UpperCAmelCase , _UpperCAmelCase , curr + 1 ):
return True
lowerCamelCase__ : Optional[Any] = 0
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : Any = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = 1
if open_knight_tour_helper(_UpperCAmelCase , (i, j) , 1 ):
return board
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowerCamelCase__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowerCamelCase__ : Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ : str = block_out_channels[0] * 4
lowerCamelCase__ : List[Any] = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCamelCase__ : Any = nn.ModuleList([] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Optional[int] = None
# down
lowerCamelCase__ : Optional[int] = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = output_channel
lowerCamelCase__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Optional[int] = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Optional[int] = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ : List[str] = out_channels
else:
lowerCamelCase__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Dict = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : int = output_channel
# out
lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ : List[Any] = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] = timesteps[None].to(sample.device )
lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = timestep_embed[..., None]
lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ : str = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ : Dict = down_block_res_samples[-1:]
lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1]
lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 50
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Tuple = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''bert'''
def __init__(self : Optional[int] , _UpperCAmelCase : Optional[Any]=3_0522 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Dict=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : str=1E-1_2 , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : Union[str, Any]="absolute" , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 146
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = OpenAIGPTTokenizer
A__ = OpenAIGPTTokenizerFast
A__ = True
A__ = False
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_UpperCAmelCase ) )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase__ = """lower"""
lowercase__ = ["""low""", """er</w>"""]
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = tokens + ["""<unk>"""]
lowercase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any]=15 ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
lowercase__ = """This is a simple input"""
lowercase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase__ = ("""This is a simple input""", """This is a pair""")
lowercase__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A ( UpperCAmelCase__ ):
'''simple docstring'''
pass
| 146
| 1
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = """https://openaipublic.azureedge.net/jukebox/models/"""
_SCREAMING_SNAKE_CASE = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowercase( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
UpperCamelCase = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
UpperCamelCase = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
UpperCamelCase = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
UpperCamelCase = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
UpperCamelCase = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
UpperCamelCase = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCamelCase = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
UpperCamelCase = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = {}
import re
UpperCamelCase = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
UpperCamelCase = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCamelCase = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
UpperCamelCase = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
UpperCamelCase = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCamelCase = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
UpperCamelCase = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
UpperCamelCase = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCamelCase = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(UpperCamelCase_ ):
UpperCamelCase = re_encoder_block_conv_in.match(UpperCamelCase_ )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase = re_encoder_block_conv_in.sub(UpperCamelCase_ , UpperCamelCase_ )
elif re_encoder_block_resnet.fullmatch(UpperCamelCase_ ):
UpperCamelCase = re_encoder_block_resnet.match(UpperCamelCase_ )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase = {"""1""": 1, """3""": 2}[groups[-2]]
UpperCamelCase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
UpperCamelCase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase = prefix + resnet_block
UpperCamelCase = re_encoder_block_resnet.sub(UpperCamelCase_ , UpperCamelCase_ )
elif re_encoder_block_proj_out.fullmatch(UpperCamelCase_ ):
UpperCamelCase = re_encoder_block_proj_out.match(UpperCamelCase_ )
UpperCamelCase = regex_match.groups()
UpperCamelCase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
UpperCamelCase = re_encoder_block_proj_out.sub(UpperCamelCase_ , UpperCamelCase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(UpperCamelCase_ ):
UpperCamelCase = re_decoder_block_conv_out.match(UpperCamelCase_ )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase = re_decoder_block_conv_out.sub(UpperCamelCase_ , UpperCamelCase_ )
elif re_decoder_block_resnet.fullmatch(UpperCamelCase_ ):
UpperCamelCase = re_decoder_block_resnet.match(UpperCamelCase_ )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase = {"""1""": 1, """3""": 2}[groups[-2]]
UpperCamelCase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
UpperCamelCase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase = prefix + resnet_block
UpperCamelCase = re_decoder_block_resnet.sub(UpperCamelCase_ , UpperCamelCase_ )
elif re_decoder_block_proj_in.fullmatch(UpperCamelCase_ ):
UpperCamelCase = re_decoder_block_proj_in.match(UpperCamelCase_ )
UpperCamelCase = regex_match.groups()
UpperCamelCase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
UpperCamelCase = re_decoder_block_proj_in.sub(UpperCamelCase_ , UpperCamelCase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(UpperCamelCase_ ):
UpperCamelCase = re_prior_cond_conv_out.match(UpperCamelCase_ )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase = re_prior_cond_conv_out.sub(UpperCamelCase_ , UpperCamelCase_ )
elif re_prior_cond_resnet.fullmatch(UpperCamelCase_ ):
UpperCamelCase = re_prior_cond_resnet.match(UpperCamelCase_ )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase = {"""1""": 1, """3""": 2}[groups[-2]]
UpperCamelCase = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
UpperCamelCase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase = prefix + resnet_block
UpperCamelCase = re_prior_cond_resnet.sub(UpperCamelCase_ , UpperCamelCase_ )
elif re_prior_cond_proj_in.fullmatch(UpperCamelCase_ ):
UpperCamelCase = re_prior_cond_proj_in.match(UpperCamelCase_ )
UpperCamelCase = regex_match.groups()
UpperCamelCase = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
UpperCamelCase = re_prior_cond_proj_in.sub(UpperCamelCase_ , UpperCamelCase_ )
# keep original key
else:
UpperCamelCase = original_key
UpperCamelCase = replace_key(UpperCamelCase_ )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
UpperCamelCase = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
UpperCamelCase = original_key
UpperCamelCase = original_key
UpperCamelCase = value
return new_dict
@torch.no_grad()
def lowercase( UpperCamelCase_=None , UpperCamelCase_=None ) -> Any:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
UpperCamelCase = requests.get(f"""{PREFIX}{file}""" , allow_redirects=UpperCamelCase_ )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=UpperCamelCase_ )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , """wb""" ).write(r.content )
UpperCamelCase = MODEL_MAPPING[model_name.split("""/""" )[-1]]
UpperCamelCase = JukeboxConfig.from_pretrained(UpperCamelCase_ )
UpperCamelCase = JukeboxModel(UpperCamelCase_ )
UpperCamelCase = []
UpperCamelCase = {}
for i, dict_name in enumerate(UpperCamelCase_ ):
UpperCamelCase = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
UpperCamelCase = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
UpperCamelCase = old_dic[k]
elif k.endswith(""".w""" ):
UpperCamelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCamelCase = old_dic[k]
else:
UpperCamelCase = old_dic[k]
UpperCamelCase = """vqvae""" if i == 0 else f"""priors.{3 - i}"""
UpperCamelCase = fix_jukebox_keys(UpperCamelCase_ , model.state_dict() , UpperCamelCase_ , UpperCamelCase_ )
weight_dict.append(UpperCamelCase_ )
UpperCamelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(UpperCamelCase_ )
for i in range(len(UpperCamelCase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , """w""" ) as txtfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase_ )
return weight_dict
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343
| 1
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = VQModel
SCREAMING_SNAKE_CASE_ = "sample"
@property
def a_ ( self, lowerCAmelCase__=(32, 32)) -> Dict:
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes).to(lowerCAmelCase__)
return {"sample": image}
@property
def a_ ( self) -> int:
return (3, 32, 32)
@property
def a_ ( self) -> Dict:
return (3, 32, 32)
def a_ ( self) -> Dict:
snake_case_ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def a_ ( self) -> Dict:
pass
def a_ ( self) -> Any:
pass
def a_ ( self) -> List[Any]:
snake_case_ , snake_case_ = VQModel.from_pretrained('fusing/vqgan-dummy', output_loading_info=lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
self.assertEqual(len(loading_info['missing_keys']), 0)
model.to(lowerCAmelCase__)
snake_case_ = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def a_ ( self) -> Tuple:
snake_case_ = VQModel.from_pretrained('fusing/vqgan-dummy')
model.to(lowerCAmelCase__).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
snake_case_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
snake_case_ = image.to(lowerCAmelCase__)
with torch.no_grad():
snake_case_ = model(lowerCAmelCase__).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143])
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase__, lowerCAmelCase__, atol=1e-3))
| 312
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
| 1
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase ( _snake_case : str , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : str=5 ) ->Tuple:
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
__snake_case : Dict = torch.tensor(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) ).unsqueeze(0 ) # Batch size 1
__snake_case : Optional[int] = model(_snake_case )[0] # The last hidden-state is the first element of the output tuple
__snake_case : Union[str, Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__snake_case : Tuple = logits[0, masked_index, :]
__snake_case : Any = logits.softmax(dim=0 )
__snake_case , __snake_case : List[Any] = prob.topk(k=_snake_case , dim=0 )
__snake_case : List[str] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_snake_case ) )] )
__snake_case : int = tokenizer.mask_token
__snake_case : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__snake_case : str = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(_snake_case ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(_snake_case ) , _snake_case ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_snake_case , _snake_case ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
SCREAMING_SNAKE_CASE : str = CamembertTokenizer.from_pretrained("""camembert-base""")
SCREAMING_SNAKE_CASE : int = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 102
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
SCREAMING_SNAKE_CASE : Dict = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
SCREAMING_SNAKE_CASE : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
SCREAMING_SNAKE_CASE : Tuple = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=None , a_=1 , a_="binary" , a_=None , a_="warn" , ):
'''simple docstring'''
__snake_case : Any = recall_score(
a_ , a_ , labels=a_ , pos_label=a_ , average=a_ , sample_weight=a_ , zero_division=a_ , )
return {"recall": float(a_ ) if score.size == 1 else score}
| 102
| 1
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class a__ ( pl.LightningModule ):
def __init__( self : str, lowerCAmelCase : Optional[Any] ) -> Any:
super().__init__()
lowercase : Union[str, Any] = model
lowercase : Tuple = 2
lowercase : List[str] = nn.Linear(self.model.config.hidden_size, self.num_labels )
def lowercase ( self : List[str] ) -> Tuple:
pass
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = LongformerModel.from_pretrained(_UpperCAmelCase )
lowercase : int = LightningModel(_UpperCAmelCase )
lowercase : Union[str, Any] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
lowercase : Any = LongformerForQuestionAnswering.from_pretrained(_UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCAmelCase )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
_UpperCamelCase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase: str = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 53
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCamelCase: Any = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 53
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Optional[int]=False ) -> Tuple:
"""simple docstring"""
try:
UpperCamelCase :str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase :str = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase :Union[str, Any] = strtobool(__magic_name__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCAmelCase_ : Any = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Dict = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : str = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : str = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : Optional[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Any = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Optional[int] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : Optional[Any] = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict ) -> Any:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase :List[str] = unittest.skip("""test requires faiss""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> int:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase :Dict = unittest.skip("""test requires regex""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> Dict:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase :int = unittest.skip("""test requires elasticsearch""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase :Tuple = unittest.skip("""test requires sqlalchemy""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase :int = unittest.skip("""test requires PyTorch""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase :str = unittest.skip("""test requires TensorFlow""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase :Optional[int] = unittest.skip("""test requires JAX""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase :int = unittest.skip("""test requires Pillow""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__magic_name__ )
else:
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> int:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__magic_name__ )
else:
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__magic_name__ )
else:
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
def _require_spacy_model(__magic_name__ : List[str] ):
try:
import spacy # noqa F401
spacy.load(__magic_name__ )
except ImportError:
return unittest.skip("""test requires spacy""" )(__magic_name__ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__magic_name__ ) )(__magic_name__ )
else:
return test_case
return _require_spacy_model
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__magic_name__ )
else:
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__magic_name__ )
else:
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase :Optional[int] = unittest.skip("""test is slow""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase :str = unittest.skip("""test is local""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> int:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase :Optional[int] = unittest.skip("""test is packaged""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase :Optional[int] = unittest.skip("""test requires remote""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( *__magic_name__ : int ) -> List[str]:
"""simple docstring"""
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(__magic_name__ ) and name.startswith("""test""" ):
for decorator in decorators:
UpperCamelCase :int = decorator(__magic_name__ )
setattr(cls , __magic_name__ , __magic_name__ )
return cls
return decorate
class _SCREAMING_SNAKE_CASE ( _a ):
pass
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[str, Any] = 0
snake_case__ : Union[str, Any] = 1
snake_case__ : List[str] = 2
@contextmanager
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict=OfflineSimulationMode.CONNECTION_FAILS , __magic_name__ : str=1E-16 ) -> Any:
"""simple docstring"""
UpperCamelCase :Optional[int] = requests.Session().request
def timeout_request(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Optional[int] , **__magic_name__ : Dict ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase :Dict = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCamelCase :List[str] = timeout
try:
return online_request(__magic_name__ , __magic_name__ , **__magic_name__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase :Any = url
UpperCamelCase :Any = e.args[0]
UpperCamelCase :Tuple = (max_retry_error.args[0].replace("""10.255.255.1""" , f"""OfflineMock[{url}]""" ),)
UpperCamelCase :List[str] = (max_retry_error,)
raise
def raise_connection_error(__magic_name__ : Tuple , __magic_name__ : int , **__magic_name__ : str ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=__magic_name__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , __magic_name__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , __magic_name__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , __magic_name__ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( *__magic_name__ : str , **__magic_name__ : Dict ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Optional[int] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__magic_name__ , **__magic_name__ ) as tmp_dir:
try:
os.chdir(__magic_name__ )
yield
finally:
os.chdir(__magic_name__ )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase :Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase :Dict = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return deepcopy(__magic_name__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(__magic_name__ ).integers(0 , 100 , 10 ).tolist()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__magic_name__ : Tuple , *__magic_name__ : List[str] , **__magic_name__ : List[Any] ):
try:
return func(*__magic_name__ , **__magic_name__ )
except HTTPError as err:
if str(__magic_name__ ).startswith("""500""" ) or str(__magic_name__ ).startswith("""502""" ):
pytest.xfail(str(__magic_name__ ) )
raise err
return decorator.decorator(_wrapper , __magic_name__ )
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
UpperCamelCase :int = returncode
UpperCamelCase :Tuple = stdout
UpperCamelCase :List[str] = stderr
async def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
while True:
UpperCamelCase :List[str] = await stream.readline()
if line:
callback(__magic_name__ )
else:
break
async def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : List[str]=None , __magic_name__ : Optional[int]=None , __magic_name__ : int=None , __magic_name__ : int=False , __magic_name__ : str=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(__magic_name__ ) )
UpperCamelCase :List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__magic_name__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__magic_name__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase :List[Any] = []
UpperCamelCase :List[Any] = []
def tee(__magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[Any]="" ):
UpperCamelCase :List[Any] = line.decode("""utf-8""" ).rstrip()
sink.append(__magic_name__ )
if not quiet:
print(__magic_name__ , __magic_name__ , file=__magic_name__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __magic_name__ : tee(__magic_name__ , __magic_name__ , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda __magic_name__ : tee(__magic_name__ , __magic_name__ , sys.stderr , label="""stderr:""" ) ),
] , timeout=__magic_name__ , )
return _RunOutput(await p.wait() , __magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : List[Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=180 , __magic_name__ : Optional[Any]=False , __magic_name__ : Any=True ) -> _RunOutput:
"""simple docstring"""
UpperCamelCase :str = asyncio.get_event_loop()
UpperCamelCase :Optional[int] = loop.run_until_complete(
_stream_subprocess(__magic_name__ , env=__magic_name__ , stdin=__magic_name__ , timeout=__magic_name__ , quiet=__magic_name__ , echo=__magic_name__ ) )
UpperCamelCase :Tuple = """ """.join(__magic_name__ )
if result.returncode > 0:
UpperCamelCase :int = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :int = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
UpperCamelCase :str = re.sub(R"""^gw""" , """""" , __magic_name__ , 0 , re.M )
return int(__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = 2_9500
UpperCamelCase :Union[str, Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 38
|
from math import sqrt
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase : Union[str, Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase : str = False
for divisor in range(2 , int(round(sqrt(SCREAMING_SNAKE_CASE__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase : Any = False
break
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'status' must been from type bool"
return status
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase : str = list(range(2 , n + 1 ) )
lowercase : Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase : Tuple = 0
# filters actual prime numbers.
lowercase : int = [x for x in begin_list if x != 0]
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
lowercase : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(SCREAMING_SNAKE_CASE__ ):
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number >= 0, "'number' must been an int and >= 0"
lowercase : Tuple = [] # this list will be returns of the function.
# potential prime number factors.
lowercase : Optional[Any] = 2
lowercase : Any = number
if number == 0 or number == 1:
ans.append(SCREAMING_SNAKE_CASE__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(SCREAMING_SNAKE_CASE__ ):
while quotient != 1:
if is_prime(SCREAMING_SNAKE_CASE__ ) and (quotient % factor == 0):
ans.append(SCREAMING_SNAKE_CASE__ )
quotient /= factor
else:
factor += 1
else:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Tuple = 0
# prime factorization of 'number'
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Union[str, Any] = 0
# prime factorization of 'number'
lowercase : Tuple = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (number > 2) and is_even(SCREAMING_SNAKE_CASE__ )
), "'number' must been an int, even and > 2"
lowercase : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase : str = get_prime_numbers(SCREAMING_SNAKE_CASE__ )
lowercase : Any = len(SCREAMING_SNAKE_CASE__ )
# run variable for while-loops.
lowercase : Optional[Any] = 0
lowercase : List[Any] = None
# exit variable. for break up the loops
lowercase : Any = True
while i < len_pn and loop:
lowercase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (len(SCREAMING_SNAKE_CASE__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase : Union[str, Any] = 0
while numbera != 0:
lowercase : Optional[int] = numbera % numbera
lowercase : Optional[int] = numbera
lowercase : Dict = rest
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase : Optional[Any] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
elif numbera == 1 or numbera == 1:
lowercase : Union[str, Any] = []
lowercase : List[str] = []
lowercase : Dict = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = 0
lowercase : Optional[Any] = 0
lowercase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase : Dict = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
ans *= n
else:
lowercase : List[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase : Optional[int] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'number' must been a positive int"
lowercase : Dict = 0
lowercase : List[str] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
ans += 1
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and is_prime(
SCREAMING_SNAKE_CASE__ ), "'ans' must been a prime number and from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert (
is_prime(SCREAMING_SNAKE_CASE__ ) and is_prime(SCREAMING_SNAKE_CASE__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase : List[str] = p_number_a + 1 # jump to the next number
lowercase : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
while number < p_number_a:
ans.append(SCREAMING_SNAKE_CASE__ )
number += 1
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and ans[0] != p_number_a
and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 1), "'n' must been int and >= 1"
lowercase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert ans[0] == 1 and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase : str = get_divisors(SCREAMING_SNAKE_CASE__ )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (divisors[0] == 1)
and (divisors[len(SCREAMING_SNAKE_CASE__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase : Tuple = gcd(abs(SCREAMING_SNAKE_CASE__ ) , abs(SCREAMING_SNAKE_CASE__ ) )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase : List[str] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase : int = 0
lowercase : Union[str, Any] = 1
lowercase : int = 1 # this will be return
for _ in range(n - 1 ):
lowercase : Optional[int] = ans
ans += fiba
lowercase : Optional[int] = tmp
return ans
| 20
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """vit_msn"""
def __init__(self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-06 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
| 178
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __magic_name__ ( __a : Any ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __magic_name__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCamelCase__ = [1, 2, 3]
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=2 )
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = [1, 2]
UpperCamelCase__ = {"""a""": 1, """b""": 2}
UpperCamelCase__ = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase__ = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase__ = [2, 3]
UpperCamelCase__ = {"""a""": 2, """b""": 3}
UpperCamelCase__ = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase__ = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
| 178
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_a = logging.get_logger(__name__)
_a = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """dpt"""
def __init__( self , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=384 , lowercase_=16 , lowercase_=3 , lowercase_=False , lowercase_=True , lowercase_=[2, 5, 8, 11] , lowercase_="project" , lowercase_=[4, 2, 1, 0.5] , lowercase_=[96, 192, 384, 768] , lowercase_=256 , lowercase_=-1 , lowercase_=False , lowercase_=True , lowercase_=0.4 , lowercase_=255 , lowercase_=0.1 , lowercase_=[1, 1024, 24, 24] , lowercase_=[0, 1] , lowercase_=None , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
UpperCAmelCase_ : List[Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
UpperCAmelCase_ : Tuple = BitConfig(**lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
logger.info("Initializing the config with a `BiT` backbone." )
UpperCAmelCase_ : int = BitConfig(**lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Union[str, Any] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
UpperCAmelCase_ : List[Any] = backbone_featmap_shape
UpperCAmelCase_ : str = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : int = patch_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Union[str, Any] = qkv_bias
UpperCAmelCase_ : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
UpperCAmelCase_ : Any = readout_type
UpperCAmelCase_ : str = reassemble_factors
UpperCAmelCase_ : str = neck_hidden_sizes
UpperCAmelCase_ : Optional[int] = fusion_hidden_size
UpperCAmelCase_ : str = head_in_index
UpperCAmelCase_ : Optional[Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase_ : Optional[Any] = use_auxiliary_head
UpperCAmelCase_ : Any = auxiliary_loss_weight
UpperCAmelCase_ : List[str] = semantic_loss_ignore_index
UpperCAmelCase_ : Union[str, Any] = semantic_classifier_dropout
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : int = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
| 61
|
"""simple docstring"""
from __future__ import annotations
import math
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = u
for i in range(1, __lowerCamelCase ):
UpperCAmelCase_ : int = temp * (u - i)
return temp
def __a ( ):
UpperCAmelCase_ : str = int(input("enter the numbers of values: " ) )
UpperCAmelCase_ : list[list[float]] = []
for _ in range(__lowerCamelCase ):
y.append([] )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
y[i].append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = 0
print("enter the values of parameters in a list: " )
UpperCAmelCase_ : Union[str, Any] = list(map(__lowerCamelCase, input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : int = float(input() )
UpperCAmelCase_ : Tuple = int(input("enter the value to interpolate: " ) )
UpperCAmelCase_ : Tuple = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, __lowerCamelCase ):
for j in range(n - i ):
UpperCAmelCase_ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
UpperCAmelCase_ : Optional[int] = y[0][0]
for i in range(1, __lowerCamelCase ):
summ += (ucal(__lowerCamelCase, __lowerCamelCase ) * y[0][i]) / math.factorial(__lowerCamelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 61
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=6 , lowerCAmelCase_=17 , lowerCAmelCase_=23 , lowerCAmelCase_=11 , lowerCAmelCase_=True , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = act_dim
_snake_case = state_dim
_snake_case = hidden_size
_snake_case = max_length
_snake_case = is_training
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_snake_case = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_snake_case = floats_tensor((self.batch_size, self.seq_length, 1) )
_snake_case = floats_tensor((self.batch_size, self.seq_length, 1) )
_snake_case = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
_snake_case = random_attention_mask((self.batch_size, self.seq_length) )
_snake_case = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase ( self ):
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = DecisionTransformerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (DecisionTransformerModel,) if is_torch_available() else ()
__lowercase = ()
__lowercase = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowercase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DecisionTransformerModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = DecisionTransformerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(lowerCAmelCase_ )] , lowerCAmelCase_ )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 2 # number of steps of autoregressive prediction we will perform
_snake_case = 10 # defined by the RL environment, may be normalized
_snake_case = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
_snake_case = model.to(lowerCAmelCase_ )
_snake_case = model.config
torch.manual_seed(0 )
_snake_case = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCAmelCase_ , dtype=torch.floataa ) # env.reset()
_snake_case = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=lowerCAmelCase_ )
_snake_case = torch.tensor(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_snake_case = state
_snake_case = torch.zeros(1 , 0 , config.act_dim , device=lowerCAmelCase_ , dtype=torch.floataa )
_snake_case = torch.zeros(1 , 0 , device=lowerCAmelCase_ , dtype=torch.floataa )
_snake_case = torch.tensor(0 , device=lowerCAmelCase_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowerCAmelCase_ ):
_snake_case = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCAmelCase_ )] , dim=1 )
_snake_case = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCAmelCase_ )] , dim=1 )
_snake_case = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_snake_case , _snake_case , _snake_case = model(
states=lowerCAmelCase_ , actions=lowerCAmelCase_ , rewards=lowerCAmelCase_ , returns_to_go=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_snake_case , _snake_case , _snake_case , _snake_case = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowerCAmelCase_ , dtype=torch.floataa ),
1.0,
False,
{},
)
_snake_case = action_pred[0, -1]
_snake_case = torch.cat([states, state] , dim=1 )
_snake_case = returns_to_go[0, -1] - reward
_snake_case = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_snake_case = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCAmelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 160
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> tuple:
_snake_case = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = ["image_processor", "tokenizer"]
__UpperCAmelCase : Any = "BlipImageProcessor"
__UpperCAmelCase : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple ) -> str:
__snake_case : int = False
super().__init__(lowerCamelCase , lowerCamelCase )
__snake_case : int = self.image_processor
def __call__( self : int , lowerCamelCase : ImageInput = None , lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Union[bool, str, TruncationStrategy] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 0 , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : Dict , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__snake_case : List[Any] = self.tokenizer
__snake_case : Dict = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
return text_encoding
# add pixel_values
__snake_case : Dict = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase )
if text is not None:
__snake_case : int = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
else:
__snake_case : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def __snake_case ( self : Any , *lowerCamelCase : List[Any] , **lowerCamelCase : Any ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] , *lowerCamelCase : List[str] , **lowerCamelCase : int ) -> List[Any]:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : List[str] ) -> Optional[Any]:
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 123
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_snake_case : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_snake_case : int = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_snake_case : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_snake_case : List[str] = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
_snake_case : Any = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
_snake_case : Optional[Any] = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
_snake_case : int = tf.keras.preprocessing.image.img_to_array(test_image)
_snake_case : Tuple = np.expand_dims(test_image, axis=0)
_snake_case : Any = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_snake_case : Any = "Normal"
if result[0][0] == 1:
_snake_case : List[str] = "Abnormality detected"
| 123
| 1
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_snake_case : Dict = logging.get_logger(__name__)
class _UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
requires_backends(self , ['bs4'] )
super().__init__(**__a )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Any:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__lowerCAmelCase = parent.find_all(child.name , recursive=__a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__a ) else next(i for i, s in enumerate(__a , 1 ) if s is child ) )
__lowerCAmelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowercase ( self : Tuple , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = BeautifulSoup(__a , 'html.parser' )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for element in html_code.descendants:
if type(__a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__lowerCAmelCase = html.unescape(__a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__a )
__lowerCAmelCase , __lowerCAmelCase = self.xpath_soup(__a )
stringaxtag_seq.append(__a )
stringaxsubs_seq.append(__a )
if len(__a ) != len(__a ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(__a ) != len(__a ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowercase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any ) -> List[Any]:
__lowerCAmelCase = ''
for tagname, subs in zip(__a , __a ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self : int , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
__lowerCAmelCase = False
# Check that strings has a valid type
if isinstance(__a , __a ):
__lowerCAmelCase = True
elif isinstance(__a , (list, tuple) ):
if len(__a ) == 0 or isinstance(html_strings[0] , __a ):
__lowerCAmelCase = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f"""but is of type {type(__a )}.""" )
__lowerCAmelCase = bool(isinstance(__a , (list, tuple) ) and (isinstance(html_strings[0] , __a )) )
if not is_batched:
__lowerCAmelCase = [html_strings]
# Get nodes + xpaths
__lowerCAmelCase = []
__lowerCAmelCase = []
for html_string in html_strings:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.get_three_from_single(__a )
nodes.append(__a )
__lowerCAmelCase = []
for node, tag_list, sub_list in zip(__a , __a , __a ):
__lowerCAmelCase = self.construct_xpath(__a , __a )
xpath_strings.append(__a )
xpaths.append(__a )
# return as Dict
__lowerCAmelCase = {'nodes': nodes, 'xpaths': xpaths}
__lowerCAmelCase = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 368
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , lowerCAmelCase_ : int = 6_5_5_3_6 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : str = "fourier" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Tuple[int] = (3_2, 3_2, 6_4) , lowerCAmelCase_ : str = None , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = False , ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
__lowerCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
__lowerCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__lowerCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
__lowerCAmelCase = block_out_channels[0]
if use_timestep_embedding:
__lowerCAmelCase = block_out_channels[0] * 4
__lowerCAmelCase = TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
# down
__lowerCAmelCase = in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
__lowerCAmelCase = get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
__lowerCAmelCase = list(reversed(lowerCAmelCase_ ) )
__lowerCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
__lowerCAmelCase = out_channels
else:
__lowerCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
__lowerCAmelCase = output_channel
# out
__lowerCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
__lowerCAmelCase = get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
__lowerCAmelCase = timestep
if not torch.is_tensor(lowerCAmelCase_ ):
__lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps[None].to(sample.device )
__lowerCAmelCase = self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
__lowerCAmelCase = self.time_mlp(lowerCAmelCase_ )
else:
__lowerCAmelCase = timestep_embed[..., None]
__lowerCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__lowerCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__lowerCAmelCase = ()
for downsample_block in self.down_blocks:
__lowerCAmelCase , __lowerCAmelCase = downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__lowerCAmelCase = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__lowerCAmelCase = down_block_res_samples[-1:]
__lowerCAmelCase = down_block_res_samples[:-1]
__lowerCAmelCase = upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
__lowerCAmelCase = self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 207
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : Union[str, Any] = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 137
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=10 , a=18 , a=30 , a=400 , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , a=None , ) -> Dict:
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 18}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_frames
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = crop_size
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : List[str] = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = VivitImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , 'image_mean'))
self.assertTrue(hasattr(a , 'image_std'))
self.assertTrue(hasattr(a , 'do_normalize'))
self.assertTrue(hasattr(a , 'do_resize'))
self.assertTrue(hasattr(a , 'do_center_crop'))
self.assertTrue(hasattr(a , 'size'))
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL videos
SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=a)
for video in video_inputs:
self.assertIsInstance(a , a)
self.assertIsInstance(video[0] , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=a , numpify=a)
for video in video_inputs:
self.assertIsInstance(a , a)
self.assertIsInstance(video[0] , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
for video in video_inputs:
self.assertIsInstance(a , a)
self.assertIsInstance(video[0] , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 137
| 1
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
A_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
A_ : List[str] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCamelCase_ ( _lowerCamelCase ):
re.sub('<n>' , '' , _lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase ) )
| 355
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 0
|
'''simple docstring'''
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = [1]
for i in range(2 ,_lowerCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Optional[Any] = list(range(_lowerCAmelCase ) )
# Find permutation
while factorials:
__lowerCamelCase : Optional[int] = factorials.pop()
__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = divmod(_lowerCAmelCase ,_lowerCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 100 ,) -> float:
__lowerCamelCase : Dict = x_start
__lowerCamelCase : int = fnc(_lowerCAmelCase )
__lowerCamelCase : Dict = 0.0
for _ in range(_lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
__lowerCamelCase : List[str] = (x_end - x_start) / steps + xa
__lowerCamelCase : List[Any] = fnc(_lowerCAmelCase )
length += math.hypot(xa - xa ,fxa - fxa )
# Increment step
__lowerCamelCase : Any = xa
__lowerCamelCase : Tuple = fxa
return length
if __name__ == "__main__":
def a_ ( _lowerCAmelCase ) -> Dict:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_UpperCamelCase = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 208
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase_ = dict(zip(UpperCamelCase__, range(len(UpperCamelCase__ ) ) ) )
lowerCAmelCase_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
lowerCAmelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase_ = os.path.join(self.tmpdirname, UpperCamelCase__ )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(UpperCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=UpperCamelCase__ )
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowerCAmelCase_ = self.get_image_processor(do_normalize=UpperCamelCase__, padding_value=1.0 )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=UpperCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''np''' )
lowerCAmelCase_ = processor(images=UpperCamelCase__, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = processor(text=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(text=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(images=UpperCamelCase__, visual_prompt=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ = processor.batch_decode(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
| 167
|
def __UpperCamelCase ( _A ):
if length <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 167
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A_ :
def __init__(self :Optional[int] , _UpperCamelCase :Optional[int] , _UpperCamelCase :int=13 , _UpperCamelCase :int=7 , _UpperCamelCase :List[str]=True , _UpperCamelCase :Optional[Any]=True , _UpperCamelCase :Union[str, Any]=True , _UpperCamelCase :int=True , _UpperCamelCase :List[Any]=99 , _UpperCamelCase :List[str]=32 , _UpperCamelCase :List[Any]=2 , _UpperCamelCase :Union[str, Any]=4 , _UpperCamelCase :int=37 , _UpperCamelCase :Optional[int]="gelu" , _UpperCamelCase :Union[str, Any]=0.1 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :Optional[Any]=512 , _UpperCamelCase :Tuple=16 , _UpperCamelCase :Optional[int]=2 , _UpperCamelCase :Any=0.0_2 , _UpperCamelCase :List[str]=3 , _UpperCamelCase :Any=4 , _UpperCamelCase :List[Any]=None , )-> List[Any]:
__A = parent
__A = 13
__A = 7
__A = True
__A = True
__A = True
__A = True
__A = 99
__A = 384
__A = 2
__A = 4
__A = 37
__A = '''gelu'''
__A = 0.1
__A = 0.1
__A = 512
__A = 16
__A = 2
__A = 0.0_2
__A = 3
__A = 4
__A = 128
__A = 2
__A = 9
__A = 1
__A = None
def _lowerCAmelCase (self :Any )-> int:
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Any , _UpperCamelCase :str , _UpperCamelCase :Optional[Any] , _UpperCamelCase :List[str] , _UpperCamelCase :Tuple , _UpperCamelCase :Optional[Any] , _UpperCamelCase :str )-> int:
__A = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__A = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__A = [input_ids, input_mask]
__A = model(_SCREAMING_SNAKE_CASE )
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :List[Any] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[Any] , _UpperCamelCase :List[Any] )-> List[Any]:
__A = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :List[str] , _UpperCamelCase :Optional[int] , _UpperCamelCase :Any , _UpperCamelCase :str , _UpperCamelCase :str )-> Optional[int]:
__A = self.num_labels
__A = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :List[str] , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :Tuple , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[Any] )-> Optional[Any]:
__A = self.num_choices
__A = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__A = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__A = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__A = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__A = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :str , _UpperCamelCase :List[str] , _UpperCamelCase :Tuple , _UpperCamelCase :Optional[Any] , _UpperCamelCase :List[str] )-> Any:
__A = self.num_labels
__A = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :List[str] , _UpperCamelCase :List[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :str , _UpperCamelCase :Optional[int] )-> Optional[int]:
__A = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase (self :List[str] )-> str:
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[Any]:
__A = TFConvBertModelTester(self )
__A = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowerCAmelCase (self :Any )-> List[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase (self :List[Any] )-> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :Union[str, Any] )-> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :Any )-> Union[str, Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :List[Any] )-> int:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :Optional[Any] )-> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :int )-> Tuple:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase (self :str )-> Optional[Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
__A = True
if hasattr(_SCREAMING_SNAKE_CASE , '''use_cache''' ):
__A = True
__A = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__A = getattr(self.model_tester , '''key_length''' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__A = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__A = os.path.join(_SCREAMING_SNAKE_CASE , '''saved_model''' , '''1''' )
__A = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__A = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__A = outputs['''encoder_hidden_states''']
__A = outputs['''encoder_attentions''']
else:
__A = outputs['''hidden_states''']
__A = outputs['''attentions''']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__A = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowerCAmelCase (self :Dict )-> Any:
__A = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[int]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
__A = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
__A = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__A = getattr(self.model_tester , '''key_length''' , _SCREAMING_SNAKE_CASE )
__A = getattr(self.model_tester , '''key_length''' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_UpperCamelCase :List[Any] ):
__A = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__A = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCamelCase :str ):
__A = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A = True
__A = False
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__A = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A = True
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__A = True
__A = True
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase (self :Tuple )-> List[str]:
__A = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
__A = tf.constant([[0, 1, 2, 3, 4, 5]] )
__A = model(_SCREAMING_SNAKE_CASE )[0]
__A = [1, 6, 768]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__A = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 117
|
from collections.abc import Generator
def lowerCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 329
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : List[str]=False ) -> str:
"""simple docstring"""
a_ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
a_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Optional[int] , __A : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a_ : Any = ''
else:
a_ : List[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ : int = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
a_ : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
a_ : Union[str, Any] = in_proj_bias[: config.hidden_size]
a_ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
a_ : List[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
a_ : Optional[Any] = dct.pop(__A )
a_ : List[str] = val
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
"""simple docstring"""
a_ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a_ : Dict = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Union[str, Any] ) -> Dict:
"""simple docstring"""
a_ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
a_ : List[str] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
a_ : Optional[int] = 10_00
a_ : Dict = 'huggingface/label-files'
a_ : Dict = 'imagenet-1k-id2label.json'
a_ : List[Any] = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
a_ : Any = {int(__A ): v for k, v in idalabel.items()}
a_ : List[Any] = idalabel
a_ : Optional[int] = {v: k for k, v in idalabel.items()}
a_ : List[Any] = int(deit_name[-6:-4] )
a_ : List[str] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
a_ : int = 1_92
a_ : Any = 7_68
a_ : Any = 12
a_ : Dict = 3
elif deit_name[9:].startswith('small' ):
a_ : Tuple = 3_84
a_ : str = 15_36
a_ : Any = 12
a_ : Any = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
a_ : Union[str, Any] = 10_24
a_ : Optional[Any] = 40_96
a_ : Any = 24
a_ : int = 16
# load original model from timm
a_ : Optional[int] = timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a_ : Any = timm_model.state_dict()
a_ : List[Any] = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
a_ : Tuple = DeiTForImageClassificationWithTeacher(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by DeiTImageProcessor
a_ : List[str] = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
a_ : Tuple = DeiTImageProcessor(size=__A , crop_size=config.image_size )
a_ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
a_ : Any = encoding['pixel_values']
a_ : List[Any] = model(__A )
a_ : str = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Any = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 120
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase_ : Dict = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase_ : Dict = 'UperNetConfig'
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int]] , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int], str] = 0 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
a_ : Dict = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , dilation=SCREAMING_SNAKE_CASE__ , )
a_ : List[str] = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.ReLU()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
a_ : int = self.conv(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.batch_norm(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.activation(SCREAMING_SNAKE_CASE__ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
super().__init__()
a_ : str = [
nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE__ ),
UperNetConvModule(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
a_ : Optional[int] = input
for layer in self.layers:
a_ : int = layer(SCREAMING_SNAKE_CASE__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple[int, ...] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool ) -> None:
super().__init__()
a_ : Tuple = pool_scales
a_ : List[Any] = align_corners
a_ : Tuple = in_channels
a_ : Optional[Any] = channels
a_ : Tuple = []
for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , channels=SCREAMING_SNAKE_CASE__ )
self.blocks.append(SCREAMING_SNAKE_CASE__ )
self.add_module(str(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> List[torch.Tensor]:
a_ : List[Any] = []
for ppm in self.blocks:
a_ : List[Any] = ppm(SCREAMING_SNAKE_CASE__ )
a_ : str = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(SCREAMING_SNAKE_CASE__ )
return ppm_outs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
super().__init__()
a_ : List[str] = config
a_ : Optional[int] = config.pool_scales # e.g. (1, 2, 3, 6)
a_ : int = in_channels
a_ : Dict = config.hidden_size
a_ : Optional[Any] = False
a_ : Tuple = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a_ : int = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a_ : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a_ : str = nn.ModuleList()
a_ : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a_ : str = UperNetConvModule(SCREAMING_SNAKE_CASE__ , self.channels , kernel_size=1 )
a_ : Union[str, Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(SCREAMING_SNAKE_CASE__ )
self.fpn_convs.append(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
a_ : Dict = inputs[-1]
a_ : str = [x]
psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE__ ) )
a_ : List[Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=1 )
a_ : Dict = self.bottleneck(SCREAMING_SNAKE_CASE__ )
return output
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
# build laterals
a_ : List[str] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE__ ) )
# build top-down path
a_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : Optional[Any] = laterals[i - 1].shape[2:]
a_ : List[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=SCREAMING_SNAKE_CASE__ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
a_ : List[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
a_ : Optional[int] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=1 )
a_ : Tuple = self.fpn_bottleneck(SCREAMING_SNAKE_CASE__ )
a_ : Dict = self.classifier(SCREAMING_SNAKE_CASE__ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
a_ : Optional[int] = config
a_ : str = config.auxiliary_in_channels
a_ : Optional[int] = config.auxiliary_channels
a_ : List[str] = config.auxiliary_num_convs
a_ : Dict = config.auxiliary_concat_input
a_ : int = in_index
a_ : List[Any] = (kernel_size // 2) * dilation
a_ : List[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , dilation=SCREAMING_SNAKE_CASE__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , dilation=SCREAMING_SNAKE_CASE__ ) )
if self.num_convs == 0:
a_ : Optional[int] = nn.Identity()
else:
a_ : int = nn.Sequential(*SCREAMING_SNAKE_CASE__ )
if self.concat_input:
a_ : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 )
a_ : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
a_ : Optional[int] = encoder_hidden_states[self.in_index]
a_ : int = self.convs(SCREAMING_SNAKE_CASE__ )
if self.concat_input:
a_ : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a_ : Dict = self.classifier(SCREAMING_SNAKE_CASE__ )
return output
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = UperNetConfig
snake_case__ : Optional[int] = '''pixel_values'''
snake_case__ : Optional[int] = True
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=False ) -> Any:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Dict = value
UpperCAmelCase_ : int = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCAmelCase_ : Tuple = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , lowercase__ , )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
super().__init__(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a_ : List[Any] = UperNetHead(SCREAMING_SNAKE_CASE__ , in_channels=self.backbone.channels )
a_ : Dict = UperNetFCNHead(SCREAMING_SNAKE_CASE__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
a_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : int = output_attentions if output_attentions is not None else self.config.output_attentions
a_ : str = self.backbone.forward_with_filtered_kwargs(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = outputs.feature_maps
a_ : Tuple = self.decode_head(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = nn.functional.interpolate(SCREAMING_SNAKE_CASE__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = None
if self.auxiliary_head is not None:
a_ : Any = self.auxiliary_head(SCREAMING_SNAKE_CASE__ )
a_ : int = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
a_ : List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a_ : Optional[Any] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : str = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : str = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a_ : Optional[Any] = (logits,) + outputs[1:]
else:
a_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 120
| 1
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : VQModel , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Dict , ):
_A = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_UpperCAmelCase , )
_A = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_A = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
for t in self.progress_bar(self.scheduler.timesteps ):
_A = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_A = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# decode the image latents with the VAE
_A = self.vqvae.decode(_UpperCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 315
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''xlnet'''
UpperCAmelCase : List[Any] = ['''mems''']
UpperCAmelCase : Any = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ):
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , _UpperCAmelCase , )
_A = kwargs['use_cache']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Tuple ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 315
| 1
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCamelCase = threading.Lock()
UpperCamelCase = None
UpperCamelCase = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
UpperCamelCase = logging.WARNING
UpperCamelCase = True
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = os.getenv("""TRANSFORMERS_VERBOSITY""" ,snake_case__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
return __name__.split(""".""" )[0]
def __lowerCamelCase ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
_SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
_SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
return log_levels
def __lowerCamelCase ( snake_case__ = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
_SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case__ )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase ( snake_case__ ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case__ )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
return set_verbosity(snake_case__ )
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
return set_verbosity(snake_case__ )
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
return set_verbosity(snake_case__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
return set_verbosity(snake_case__ )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase ( snake_case__ ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case__ )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
_SCREAMING_SNAKE_CASE = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(snake_case__ )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case__ )
def __lowerCamelCase ( self ,*snake_case__ ,**snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" ,snake_case__ )
if no_advisory_warnings:
return
self.warning(*snake_case__ ,**snake_case__ )
UpperCamelCase = warning_advice
@functools.lru_cache(snake_case__ )
def __lowerCamelCase ( self ,*snake_case__ ,**snake_case__ ) -> str:
"""simple docstring"""
self.warning(*snake_case__ ,**snake_case__ )
UpperCamelCase = warning_once
class __UpperCAmelCase :
def __init__( self: Optional[int] , *UpperCAmelCase_: Tuple , **UpperCAmelCase_: int ): # pylint: disable=unused-argument
'''simple docstring'''
_SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self: Any ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self: List[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
def empty_fn(*UpperCAmelCase_: Tuple , **UpperCAmelCase_: str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self: str ):
'''simple docstring'''
return self
def __exit__( self: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return
class __UpperCAmelCase :
def __call__( self: Tuple , *UpperCAmelCase_: List[Any] , **UpperCAmelCase_: int ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*UpperCAmelCase_ , **UpperCAmelCase_ )
else:
return EmptyTqdm(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Union[str, Any] , **UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase = _tqdm_cls()
def __lowerCamelCase ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
global _tqdm_active
_SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
global _tqdm_active
_SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 125
|
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(length - 1 ):
_SCREAMING_SNAKE_CASE = i
for k in range(i + 1 ,snake_case__ ):
if collection[k] < collection[least]:
_SCREAMING_SNAKE_CASE = k
if least != i:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 125
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178
| 1
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase: Union[str, Any] = logging.getLogger(__name__)
lowerCAmelCase: Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase: Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__:
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCamelCase__ )} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a__:
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """Whether ot not to use whole word mask."""} )
lowercase__ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
lowercase__ = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
lowercase__ = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
lowercase__ = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCamelCase__ ( _A , _A , _A = False , _A = None , ):
def _dataset(_A , _A=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_A , file_path=_A , block_size=args.block_size , ref_path=_A , )
return LineByLineTextDataset(tokenizer=_A , file_path=_A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_A , file_path=_A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a , a , a : Tuple = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
a : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
a : List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
a : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
a : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
a : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
a : List[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
a : List[str] = AutoModelWithLMHead.from_config(_A )
model.resize_token_embeddings(len(_A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
a : Optional[Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
a : Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
a : Union[str, Any] = (
get_dataset(_A , tokenizer=_A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
a : str = (
get_dataset(_A , tokenizer=_A , evaluate=_A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
a : int = DataCollatorForPermutationLanguageModeling(
tokenizer=_A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
a : int = DataCollatorForWholeWordMask(
tokenizer=_A , mlm_probability=data_args.mlm_probability )
else:
a : str = DataCollatorForLanguageModeling(
tokenizer=_A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
a : Optional[Any] = Trainer(
model=_A , args=_A , data_collator=_A , train_dataset=_A , eval_dataset=_A , prediction_loss_only=_A , )
# Training
if training_args.do_train:
a : Optional[int] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a : Any = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a : List[str] = trainer.evaluate()
a : Any = math.exp(eval_output['eval_loss'] )
a : Any = {'perplexity': perplexity}
a : Union[str, Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _A , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_A )
return results
def lowerCamelCase__ ( _A ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 96
|
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def lowercase_ ( self : int ):
super().setUp()
a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Any , __snake_case : str ):
a : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。'
a : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a , a : List[str] = self.get_input_output_texts(__snake_case )
a : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
a : str = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
return text, ids
def lowercase_ ( self : Optional[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : Dict ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file )
a : Optional[int] = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase_ ( self : Union[str, Any] ):
a : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(__snake_case )
a : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[Any] = pickle.load(__snake_case )
a : Tuple = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase_ ( self : Dict ):
a : List[str] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : List[Any] ):
try:
a : int = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Any ):
try:
a : Union[str, Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : str ):
a : Tuple = MecabTokenizer(do_lower_case=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Union[str, Any] ):
try:
a : Any = MecabTokenizer(
do_lower_case=__snake_case , normalize_text=__snake_case , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase_ ( self : List[Any] ):
a : Dict = MecabTokenizer(normalize_text=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase_ ( self : str ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(__snake_case )
a : List[Any] = 'こんにちは、世界。\nこんばんは、世界。'
a : int = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Tuple = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[int] = pickle.load(__snake_case )
a : List[Any] = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_sudachi
def lowercase_ ( self : List[Any] ):
a : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Any ):
a : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Optional[int] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase_ ( self : Dict ):
a : Optional[int] = SudachiTokenizer(do_lower_case=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Tuple ):
a : int = SudachiTokenizer(normalize_text=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = SudachiTokenizer(trim_whitespace=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(__snake_case )
a : str = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : List[str] = pickle.load(__snake_case )
a : Any = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : List[Any] = JumanppTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : List[Any] = JumanppTokenizer(normalize_text=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : str = JumanppTokenizer(trim_whitespace=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase_ ( self : Tuple ):
a : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase_ ( self : Any ):
a : int = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
a : Optional[int] = {}
for i, token in enumerate(__snake_case ):
a : Dict = i
a : Optional[Any] = WordpieceTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
a : List[Any] = tokenizer.subword_tokenizer
a : List[str] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(__snake_case , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
a : Union[str, Any] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(__snake_case , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
a : Dict = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : str = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def lowercase_ ( self : List[Any] ):
super().setUp()
a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Optional[Any] , **__snake_case : List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **__snake_case )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
a : int = 'こんにちは、世界。 \nこんばんは、世界。'
a : Optional[Any] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase_ ( self : str ):
pass # TODO add if relevant
def lowercase_ ( self : List[str] ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
a : Tuple = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
__snake_case , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase_ ( self : Any ):
a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : Optional[Any] = {}
for i, token in enumerate(__snake_case ):
a : Tuple = i
a : Optional[int] = CharacterTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
a : Optional[int] = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
a : List[Any] = 'cl-tohoku/bert-base-japanese'
a : Dict = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
class a__( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
a : Dict = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 96
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = (DEISMultistepScheduler,)
__lowerCamelCase : Union[str, Any] = (('''num_inference_steps''', 25),)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**__lowerCAmelCase )
return config
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Dict:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Any:
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**__lowerCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**__lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def _snake_case ( self ) -> Any:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**__lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ):
_lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase = scheduler.timesteps[5]
_lowerCAmelCase = scheduler.timesteps[6]
_lowerCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
_lowerCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=__lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=__lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _snake_case ( self ) -> int:
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , algorithm_type="deis" , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def _snake_case ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _snake_case ( self ) -> Any:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , algorithm_type=__lowerCAmelCase , )
_lowerCAmelCase = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , algorithm_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Any:
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**__lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 158
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
A : str = 0
A : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
A : Union[str, Any] = tuple[int, int]
class A :
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None , ) -> None:
"""simple docstring"""
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = g_cost
A__ = parent
A__ = self.calculate_heuristic()
A__ = self.g_cost + self.h_cost
def a_ ( self : Dict ) -> float:
"""simple docstring"""
A__ = self.pos_x - self.goal_x
A__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__lowerCAmelCase ) + abs(__lowerCAmelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : int , __lowerCAmelCase : Node ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> Tuple:
"""simple docstring"""
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCAmelCase )
A__ = [self.start]
A__ = []
A__ = False
def a_ ( self : List[str] ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__lowerCAmelCase )
self.closed_nodes.append(__lowerCAmelCase )
A__ = self.get_successors(__lowerCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
A__ = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCAmelCase )
else:
self.open_nodes.append(__lowerCAmelCase )
return [self.start.pos]
def a_ ( self : Optional[Any] , __lowerCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) )
return successors
def a_ ( self : List[Any] , __lowerCAmelCase : Node | None ) -> list[TPosition]:
"""simple docstring"""
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class A :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> None:
"""simple docstring"""
A__ = AStar(__lowerCAmelCase , __lowerCAmelCase )
A__ = AStar(__lowerCAmelCase , __lowerCAmelCase )
A__ = False
def a_ ( self : int ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A__ = self.fwd_astar.open_nodes.pop(0 )
A__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__lowerCAmelCase , __lowerCAmelCase )
self.fwd_astar.closed_nodes.append(__lowerCAmelCase )
self.bwd_astar.closed_nodes.append(__lowerCAmelCase )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_astar: self.fwd_astar.get_successors(__lowerCAmelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__lowerCAmelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
A__ = astar.open_nodes.pop(
astar.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__lowerCAmelCase )
else:
astar.open_nodes.append(__lowerCAmelCase )
return [self.fwd_astar.start.pos]
def a_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ) -> list[TPosition]:
"""simple docstring"""
A__ = self.fwd_astar.retrace_path(__lowerCAmelCase )
A__ = self.bwd_astar.retrace_path(__lowerCAmelCase )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
A : Optional[int] = (0, 0)
A : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Dict = time.time()
A : Optional[Any] = AStar(init, goal)
A : Optional[int] = a_star.search()
A : Optional[int] = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
A : Dict = time.time()
A : Tuple = BidirectionalAStar(init, goal)
A : List[Any] = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 274
| 0
|
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowercase = 299_792_458
# Symbols
__lowercase , __lowercase , __lowercase , __lowercase = symbols('''ct x y z''')
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(__UpperCamelCase ) ** 2 )
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
return np.array(
[
[gamma(__UpperCamelCase ), -gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), 0, 0],
[-gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), gamma(__UpperCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase (__UpperCamelCase : float , __UpperCamelCase : np.ndarray | None = None ):
"""simple docstring"""
if event is None:
__UpperCamelCase =np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__UpperCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowercase = transform(29_979_245)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__lowercase = {ct: c, x: 1, y: 1, z: 1}
__lowercase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 85
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ConditionalDetrFeatureExtractor''']
__lowercase = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a :
def __init__( self :Optional[int] ,__lowercase :Dict ,__lowercase :Optional[int]=1_3 ,__lowercase :Tuple=7 ,__lowercase :Optional[Any]=False ,__lowercase :Dict=True ,__lowercase :Any=False ,__lowercase :str=False ,__lowercase :Tuple=1_9 ,__lowercase :int=3_2 ,__lowercase :List[str]=5 ,__lowercase :int=4 ,__lowercase :str=3_7 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :List[Any]=0.1 ,__lowercase :List[Any]=0.1 ,__lowercase :str=5_1_2 ,__lowercase :Any=1_6 ,__lowercase :Union[str, Any]=2 ,__lowercase :int=0.02 ,__lowercase :int=3 ,__lowercase :List[str]=4 ,__lowercase :Any=None ,):
snake_case__ : str = parent
snake_case__ : Any = batch_size
snake_case__ : Dict = seq_length
snake_case__ : Tuple = is_training
snake_case__ : List[str] = use_input_mask
snake_case__ : Any = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : List[str] = vocab_size
snake_case__ : Dict = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Optional[int] = type_sequence_label_size
snake_case__ : Tuple = initializer_range
snake_case__ : int = num_labels
snake_case__ : str = num_choices
snake_case__ : int = scope
def __lowerCamelCase ( self :int ):
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Optional[Any] = None
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Any = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = EsmConfig(
vocab_size=3_3 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=__lowercase ,esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} ,)
return config
def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :List[Any] ,__lowercase :Tuple ,__lowercase :Optional[Any] ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ):
snake_case__ : List[str] = EsmForProteinFolding(config=__lowercase ).float()
model.to(__lowercase )
model.eval()
snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase )
snake_case__ : int = model(__lowercase )
snake_case__ : List[Any] = model(__lowercase )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : str = config_and_inputs
snake_case__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( A__ , A__ , unittest.TestCase ):
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCAmelCase : Any = ()
__lowerCAmelCase : List[Any] = {} if is_torch_available() else {}
__lowerCAmelCase : Optional[int] = False
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Any = EsmFoldModelTester(self )
snake_case__ : str = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :List[str] ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
@unittest.skip('''Does not support attention outputs''' )
def __lowerCamelCase ( self :Optional[Any] ):
pass
@unittest.skip
def __lowerCamelCase ( self :Optional[int] ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowerCamelCase ( self :Optional[int] ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowerCamelCase ( self :List[Any] ):
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __lowerCamelCase ( self :Tuple ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowerCamelCase ( self :Tuple ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowerCamelCase ( self :Dict ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowerCamelCase ( self :int ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowerCamelCase ( self :Any ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowerCamelCase ( self :Any ):
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __lowerCamelCase ( self :Optional[Any] ):
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __lowerCamelCase ( self :Dict ):
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __lowerCamelCase ( self :Tuple ):
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __lowerCamelCase ( self :Optional[Any] ):
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __lowerCamelCase ( self :Optional[int] ):
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __lowerCamelCase ( self :Union[str, Any] ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowerCamelCase ( self :Tuple ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowerCamelCase ( self :List[Any] ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowerCamelCase ( self :Optional[Any] ):
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __lowerCamelCase ( self :Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCamelCase ( self :Any ):
pass
@require_torch
class a ( A__ ):
@slow
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Union[str, Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
snake_case__ : Optional[int] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
snake_case__ : List[str] = model(__lowercase )['''positions''']
snake_case__ : str = torch.tensor([2.5828, 0.7993, -10.9334] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,__lowercase ,atol=1e-4 ) )
| 230
|
from math import asin, atan, cos, radians, sin, sqrt, tan
A__ : Optional[int] = 637_8137.0
A__ : List[str] = 635_6752.31_4245
A__ : Union[str, Any] = 6_37_81_37
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = radians(lowerCamelCase_ )
lowercase__ = radians(lowerCamelCase_ )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(lowerCamelCase_ ) * cos(lowerCamelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 0
|
import math
from datetime import datetime, timedelta
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = year % 19
SCREAMING_SNAKE_CASE_ = year % 4
SCREAMING_SNAKE_CASE_ = year % 7
SCREAMING_SNAKE_CASE_ = math.floor(year / 1_00 )
SCREAMING_SNAKE_CASE_ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE_ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowerCamelCase, 4, 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowerCamelCase, 4, 18 )
else:
return datetime(__lowerCamelCase, 3, 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__UpperCAmelCase = "will be" if year > datetime.now().year else "was"
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 371
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__UpperCAmelCase = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
__UpperCAmelCase = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
__UpperCAmelCase = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _UpperCamelCase ( self , _A , _A , _A=4 , _A=False ) -> List[str]:
SCREAMING_SNAKE_CASE_ = compute_bleu(
reference_corpus=_A , translation_corpus=_A , max_order=_A , smooth=_A )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 257
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "megatron-bert"
def __init__( self : int , lowercase_ : int=29056 , lowercase_ : Dict=1024 , lowercase_ : Optional[Any]=24 , lowercase_ : Union[str, Any]=16 , lowercase_ : Optional[int]=4096 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : int=512 , lowercase_ : Optional[Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1e-12 , lowercase_ : Optional[Any]=0 , lowercase_ : Dict="absolute" , lowercase_ : Any=True , **lowercase_ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = position_embedding_type
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache
| 91
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (PNDMScheduler,)
__UpperCamelCase = (("num_inference_steps", 5_0),)
def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase_)
return config
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_)
new_scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_)
SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_)
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_)
for i, t in enumerate(scheduler.prk_timesteps):
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample
return sample
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_)
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''):
scheduler.set_timesteps(lowercase_)
elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''):
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1)
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : str = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(lowercase_):
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.full_loop()
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_98.13_18) < 1e-2
assert abs(result_mean.item() - 0.25_80) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''')
SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 67.39_86) < 1e-2
assert abs(result_mean.item() - 0.08_78) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01)
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 2_30.03_99) < 1e-2
assert abs(result_mean.item() - 0.29_95) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01)
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_86.94_82) < 1e-2
assert abs(result_mean.item() - 0.24_34) < 1e-3
| 91
| 1
|
import sys
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = len(a__ )
SCREAMING_SNAKE_CASE : Tuple = [[0 for x in range(a__ )] for x in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[Any] = [[0 for x in range(a__ )] for x in range(a__ )]
for chain_length in range(2 , a__ ):
for a in range(1 , n - chain_length + 1 ):
SCREAMING_SNAKE_CASE : Dict = a + chain_length - 1
SCREAMING_SNAKE_CASE : Optional[int] = sys.maxsize
for c in range(a__ , a__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
SCREAMING_SNAKE_CASE : Union[str, Any] = cost
SCREAMING_SNAKE_CASE : List[Any] = c
return matrix, sol
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if i == j:
print('''A''' + str(a__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(a__ , a__ , optimal_solution[i][j] )
print_optiomal_solution(a__ , optimal_solution[i][j] + 1 , a__ )
print(''')''' , end=''' ''' )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [30, 35, 15, 5, 10, 20, 25]
SCREAMING_SNAKE_CASE : Dict = len(a__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_chain_order(a__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(a__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 359
|
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def UpperCAmelCase_( a__ = 2_000_000 ):
"""simple docstring"""
return sum(takewhile(lambda a__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 19
| 0
|
from __future__ import annotations
from typing import Any
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
create_state_space_tree(__SCREAMING_SNAKE_CASE , [] , 0)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
if index == len(__SCREAMING_SNAKE_CASE):
print(__SCREAMING_SNAKE_CASE)
return
create_state_space_tree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1)
current_subsequence.append(sequence[index])
create_state_space_tree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1)
current_subsequence.pop()
if __name__ == "__main__":
__UpperCAmelCase : str = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 111
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_ ( ):
lowercase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def UpperCAmelCase_ ( ):
lowercase = parse_args()
# Import training_script as a module.
lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase = script_fpath.stem
lowercase = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowercase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 195
| 0
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCamelCase : Optional[Any] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """maskformer"""
_snake_case = {"""hidden_size""": """mask_feature_size"""}
_snake_case = ["""resnet""", """swin"""]
_snake_case = ["""detr"""]
def __init__( self , A = 2_5_6 , A = 2_5_6 , A = 0.1 , A = False , A = None , A = None , A = 0.02 , A = 1.0 , A = 1.0 , A = 1.0 , A = 20.0 , A = None , **A , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
snake_case : Any = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(A , A ):
snake_case : List[Any] = backbone_config.pop("""model_type""" )
snake_case : Optional[int] = CONFIG_MAPPING[backbone_model_type]
snake_case : Any = config_class.from_dict(A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
snake_case : Tuple = DetrConfig()
else:
# verify that the decoder is supported
snake_case : Union[str, Any] = (
decoder_config.pop("""model_type""" ) if isinstance(A , A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(A , A ):
snake_case : List[str] = CONFIG_MAPPING[decoder_type]
snake_case : Optional[Any] = config_class.from_dict(A )
snake_case : List[Any] = backbone_config
snake_case : Tuple = decoder_config
# main feature dimension for the model
snake_case : str = fpn_feature_size
snake_case : Optional[Any] = mask_feature_size
# initializer
snake_case : Union[str, Any] = init_std
snake_case : Optional[int] = init_xavier_std
# Hungarian matcher && loss
snake_case : int = cross_entropy_weight
snake_case : Union[str, Any] = dice_weight
snake_case : int = mask_weight
snake_case : Dict = use_auxiliary_loss
snake_case : List[Any] = no_object_weight
snake_case : str = output_auxiliary_logits
snake_case : List[Any] = self.decoder_config.encoder_attention_heads
snake_case : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**A )
@classmethod
def UpperCAmelCase ( cls , A , A , **A ) -> Union[str, Any]:
return cls(
backbone_config=A , decoder_config=A , **A , )
def UpperCAmelCase ( self ) -> Dict[str, any]:
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : int = self.backbone_config.to_dict()
snake_case : Optional[Any] = self.decoder_config.to_dict()
snake_case : List[str] = self.__class__.model_type
return output
| 176
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> Optional[int]:
super().__init__(*A , **A )
requires_backends(self , """decord""" )
self.check_model_type(A )
def UpperCAmelCase ( self , A=None , A=None , A=None ) -> int:
snake_case : Any = {}
if frame_sampling_rate is not None:
snake_case : int = frame_sampling_rate
if num_frames is not None:
snake_case : Union[str, Any] = num_frames
snake_case : str = {}
if top_k is not None:
snake_case : Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , A , **A ) -> Dict:
return super().__call__(A , **A )
def UpperCAmelCase ( self , A , A=None , A=1 ) -> Tuple:
if num_frames is None:
snake_case : Tuple = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
snake_case : Optional[int] = BytesIO(requests.get(A ).content )
snake_case : Optional[Any] = VideoReader(A )
videoreader.seek(0 )
snake_case : Optional[Any] = 0
snake_case : Optional[Any] = num_frames * frame_sampling_rate - 1
snake_case : Any = np.linspace(A , A , num=A , dtype=np.intaa )
snake_case : int = videoreader.get_batch(A ).asnumpy()
snake_case : List[Any] = list(A )
snake_case : Optional[Any] = self.image_processor(A , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case : Dict = self.model(**A )
return model_outputs
def UpperCAmelCase ( self , A , A=5 ) -> int:
if top_k > self.model.config.num_labels:
snake_case : str = self.model.config.num_labels
if self.framework == "pt":
snake_case : List[Any] = model_outputs.logits.softmax(-1 )[0]
snake_case , snake_case : Tuple = probs.topk(A )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
snake_case : List[Any] = scores.tolist()
snake_case : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A , A )]
| 176
| 1
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = inspect.getfile(accelerate.test_utils)
lowerCAmelCase_ : Tuple = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCAmelCase_ : int = test_metrics
@require_cpu
def UpperCAmelCase__ ( self : Union[str, Any]):
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def UpperCAmelCase__ ( self : List[str]):
debug_launcher(self.test_metrics.main)
@require_single_gpu
def UpperCAmelCase__ ( self : Union[str, Any]):
self.test_metrics.main()
@require_multi_gpu
def UpperCAmelCase__ ( self : str):
print(F"""Found {torch.cuda.device_count()} devices.""")
lowerCAmelCase_ : Optional[int] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
| 103
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : list ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(UpperCamelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(UpperCamelCase ) == 1:
return True
UpperCAmelCase : Union[str, Any] = series[1] - series[0]
for index in range(len(UpperCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( UpperCamelCase : list ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(UpperCamelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
UpperCAmelCase : Any = 0
for val in series:
answer += val
return answer / len(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a : Dict = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(_lowerCAmelCase )
class _a ( _lowerCAmelCase ):
A = '''rag'''
A = True
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=" / ", SCREAMING_SNAKE_CASE_=" // ", SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=300, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_="wiki_dpr", SCREAMING_SNAKE_CASE_="train", SCREAMING_SNAKE_CASE_="compressed", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> str:
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_, pad_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, decoder_start_token_id=SCREAMING_SNAKE_CASE_, forced_eos_token_id=SCREAMING_SNAKE_CASE_, is_encoder_decoder=SCREAMING_SNAKE_CASE_, prefix=SCREAMING_SNAKE_CASE_, vocab_size=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_: Optional[Any] = kwargs.pop("""question_encoder""" )
UpperCAmelCase_: Tuple = question_encoder_config.pop("""model_type""" )
UpperCAmelCase_: List[Any] = kwargs.pop("""generator""" )
UpperCAmelCase_: List[Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_: List[str] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = AutoConfig.for_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = reduce_loss
UpperCAmelCase_: List[Any] = label_smoothing
UpperCAmelCase_: Optional[Any] = exclude_bos_score
UpperCAmelCase_: Optional[Any] = do_marginalize
UpperCAmelCase_: Union[str, Any] = title_sep
UpperCAmelCase_: int = doc_sep
UpperCAmelCase_: Dict = n_docs
UpperCAmelCase_: List[Any] = max_combined_length
UpperCAmelCase_: Dict = dataset
UpperCAmelCase_: Optional[int] = dataset_split
UpperCAmelCase_: Optional[int] = index_name
UpperCAmelCase_: List[Any] = retrieval_vector_size
UpperCAmelCase_: List[str] = retrieval_batch_size
UpperCAmelCase_: Optional[int] = passages_path
UpperCAmelCase_: Optional[int] = index_path
UpperCAmelCase_: Optional[Any] = use_dummy_dataset
UpperCAmelCase_: Optional[Any] = output_retrieved
UpperCAmelCase_: Tuple = do_deduplication
UpperCAmelCase_: Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_: Optional[Any] = getattr(self.generator, """forced_eos_token_id""", SCREAMING_SNAKE_CASE_ )
@classmethod
def __snake_case (cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
UpperCAmelCase_: str = copy.deepcopy(self.__dict__ )
UpperCAmelCase_: Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_: Tuple = self.generator.to_dict()
UpperCAmelCase_: Any = self.__class__.model_type
return output
| 82
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> int:
UpperCAmelCase_: List[Any] = parent
UpperCAmelCase_: int = batch_size
UpperCAmelCase_: Any = seq_length
UpperCAmelCase_: Optional[int] = is_training
UpperCAmelCase_: Dict = use_input_mask
UpperCAmelCase_: Optional[int] = use_token_type_ids
UpperCAmelCase_: Dict = use_labels
UpperCAmelCase_: List[str] = vocab_size
UpperCAmelCase_: Union[str, Any] = hidden_size
UpperCAmelCase_: List[Any] = num_hidden_layers
UpperCAmelCase_: Tuple = num_attention_heads
UpperCAmelCase_: Optional[int] = intermediate_size
UpperCAmelCase_: Tuple = hidden_act
UpperCAmelCase_: Tuple = hidden_dropout_prob
UpperCAmelCase_: List[str] = attention_probs_dropout_prob
UpperCAmelCase_: Any = max_position_embeddings
UpperCAmelCase_: List[Any] = type_vocab_size
UpperCAmelCase_: List[str] = type_sequence_label_size
UpperCAmelCase_: Tuple = initializer_range
UpperCAmelCase_: Optional[int] = num_labels
UpperCAmelCase_: Union[str, Any] = num_choices
UpperCAmelCase_: Any = scope
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: str = None
if self.use_input_mask:
UpperCAmelCase_: Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: int = None
if self.use_token_type_ids:
UpperCAmelCase_: int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase_: Dict = None
UpperCAmelCase_: List[str] = None
UpperCAmelCase_: Any = None
if self.use_labels:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase_: List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self ) -> List[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, use_stable_embedding=SCREAMING_SNAKE_CASE_, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: List[Any] = OpenLlamaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: List[Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> List[Any]:
UpperCAmelCase_: Any = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Any:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: Dict = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# first forward pass
UpperCAmelCase_: str = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, use_cache=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_: Tuple = ids_tensor((self.batch_size, 3), config.vocab_size )
UpperCAmelCase_: Optional[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_: str = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCAmelCase_: str = torch.cat([input_mask, next_mask], dim=-1 )
UpperCAmelCase_: Dict = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_, )["""hidden_states"""][0]
UpperCAmelCase_: Tuple = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, past_key_values=SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_, )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_: str = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCAmelCase_: str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_: Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-3 ) )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): List[Any] = config_and_inputs
UpperCAmelCase_: List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
A = (OpenLlamaForCausalLM,) if is_torch_available() else ()
A = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = False
def __snake_case (self ) -> int:
UpperCAmelCase_: str = OpenLlamaModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_: Dict = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_ , UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: int = 3
UpperCAmelCase_: Tuple = input_dict["""input_ids"""]
UpperCAmelCase_: Optional[int] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCAmelCase_: Optional[int] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case (self ) -> int:
UpperCAmelCase_ , UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Dict = 3
UpperCAmelCase_: Optional[Any] = """single_label_classification"""
UpperCAmelCase_: Optional[int] = input_dict["""input_ids"""]
UpperCAmelCase_: str = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCAmelCase_: List[str] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Optional[int] = 3
UpperCAmelCase_: int = """multi_label_classification"""
UpperCAmelCase_: Tuple = input_dict["""input_ids"""]
UpperCAmelCase_: int = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_: Optional[Any] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def __snake_case (self ) -> int:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Dict = ids_tensor([1, 10], config.vocab_size )
UpperCAmelCase_: Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_: Any = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
original_model.to(SCREAMING_SNAKE_CASE_ )
original_model.eval()
UpperCAmelCase_: Any = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
UpperCAmelCase_: Tuple = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_: Optional[Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase_: int = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
scaled_model.to(SCREAMING_SNAKE_CASE_ )
scaled_model.eval()
UpperCAmelCase_: Union[str, Any] = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
UpperCAmelCase_: Union[str, Any] = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
| 82
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __A : Union[str, Any] , __A : Tuple=7 , __A : Tuple=3 , __A : Optional[int]=1_8 , __A : int=3_0 , __A : Any=4_0_0 , __A : Optional[Any]=True , __A : List[Any]=None , __A : Union[str, Any]=True , __A : Any=None , ):
__UpperCamelCase = size if size is not None else {'shortest_edge': 2_0}
__UpperCamelCase = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
def _lowerCamelCase ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =MobileNetVaImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : str ):
__UpperCamelCase = MobileNetVaImageProcessingTester(self )
@property
def _lowerCamelCase ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'size' ) )
self.assertTrue(hasattr(__A , 'do_center_crop' ) )
self.assertTrue(hasattr(__A , 'crop_size' ) )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _lowerCamelCase ( self : List[str] ):
pass
def _lowerCamelCase ( self : Union[str, Any] ):
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : Union[str, Any] ):
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : Union[str, Any] ):
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 53
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=lowerCAmelCase__ , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ )
class lowerCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=lowerCAmelCase__ , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ )
def lowercase () -> str:
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def lowercase () -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
@require_beam
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def __A ( self ) -> int:
import apache_beam as beam
SCREAMING_SNAKE_CASE = beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
SCREAMING_SNAKE_CASE = partial(lowerCAmelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def __A ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = NestedBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 113
| 0
|
from itertools import permutations
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE = [7, 11, 13, 17]
for i, test in enumerate(_SCREAMING_SNAKE_CASE ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
for num in permutations(range(_SCREAMING_SNAKE_CASE ) )
if is_substring_divisible(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 193
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
SCREAMING_SNAKE_CASE_ = """\
Text data.
Second line of data."""
SCREAMING_SNAKE_CASE_ = """file"""
@pytest.fixture(scope="""session""" )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with zstd.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE = input_paths[compression_format]
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """custom_cache"""
SCREAMING_SNAKE_CASE = """custom_extracted_dir"""
SCREAMING_SNAKE_CASE = tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE = xz_file
SCREAMING_SNAKE_CASE = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
SCREAMING_SNAKE_CASE = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
SCREAMING_SNAKE_CASE = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
SCREAMING_SNAKE_CASE = """./__missing_file__.txt"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __lowercase ( ) -> Dict:
'''simple docstring'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get("""https://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get("""ftp://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get("""s3://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head("""s3://huggingface.co""" )
| 193
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Union[str, Any] ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : Dict ={
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_lowercase : Any ={"facebook/blenderbot_small-90M": 512}
def lowerCAmelCase_ ( _lowercase : Any) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = set()
a__ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a__ : Optional[Any] = char
a__ : Tuple = set(_lowercase)
return pairs
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :Any = ["input_ids", "attention_mask"]
def __init__( self , __lowercase , __lowercase , __lowercase="__start__" , __lowercase="__end__" , __lowercase="__unk__" , __lowercase="__null__" , **__lowercase , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
a__ : Optional[int] = json.load(__lowercase )
a__ : str = {v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
a__ : Any = merges_handle.read().split("""\n""" )[1:-1]
a__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
a__ : Dict = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
a__ : Dict = {}
@property
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ : Any = re.sub("""([.,!?()])""" , r""" \1""" , __lowercase )
a__ : int = re.sub("""(')""" , r""" \1 """ , __lowercase )
a__ : Tuple = re.sub(r"""\s{2,}""" , """ """ , __lowercase )
if "\n" in token:
a__ : Union[str, Any] = token.replace("""\n""" , """ __newln__""" )
a__ : Optional[int] = token.split(""" """ )
a__ : Union[str, Any] = []
for token in tokens:
if not len(__lowercase ):
continue
a__ : Union[str, Any] = token.lower()
a__ : List[Any] = tuple(__lowercase )
a__ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
a__ : Any = get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
a__ : Optional[int] = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : str = bigram
a__ : str = []
a__ : Optional[Any] = 0
while i < len(__lowercase ):
try:
a__ : Tuple = word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
a__ : Optional[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : List[Any] = tuple(__lowercase )
a__ : Any = new_word
if len(__lowercase ) == 1:
break
else:
a__ : Optional[int] = get_pairs(__lowercase )
a__ : List[Any] = """@@ """.join(__lowercase )
a__ : Optional[Any] = word[:-4]
a__ : Any = word
words.append(__lowercase )
return " ".join(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
a__ : Dict = []
a__ : Optional[Any] = re.findall(r"""\S+\n?""" , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(""" """ ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
return self.decoder.get(__lowercase , self.unk_token )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : int = """ """.join(__lowercase ).replace("""@@ """ , """""" ).strip()
return out_string
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Dict = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a__ : List[Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
a__ : List[str] = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
a__ : Optional[int] = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
| 170
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ (A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__lowercase , """depth_multiplier""" ) )
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=3 , __lowercase=3_2 , __lowercase=0.2_5 , __lowercase=8 , __lowercase=True , __lowercase=1_0_2_4 , __lowercase=3_2 , __lowercase="relu6" , __lowercase=0.1 , __lowercase=0.0_2 , __lowercase=True , __lowercase=True , __lowercase=1_0 , __lowercase=None , ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = parent
a__ : Dict = batch_size
a__ : Optional[int] = num_channels
a__ : int = image_size
a__ : Union[str, Any] = depth_multiplier
a__ : int = min_depth
a__ : List[str] = tf_padding
a__ : Tuple = int(last_hidden_size * depth_multiplier )
a__ : Union[str, Any] = output_stride
a__ : List[Any] = hidden_act
a__ : int = classifier_dropout_prob
a__ : str = use_labels
a__ : Dict = is_training
a__ : Dict = num_labels
a__ : int = initializer_range
a__ : List[Any] = scope
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : str = None
a__ : List[str] = None
if self.use_labels:
a__ : Any = ids_tensor([self.batch_size] , self.num_labels )
a__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = MobileNetVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
a__ : List[Any] = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : int = self.num_labels
a__ : Dict = MobileNetVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
a__ : List[str] = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : Dict = config_and_inputs
a__ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__lowerCAmelCase :int = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase :List[Any] = False
__lowerCAmelCase :Optional[Any] = False
__lowerCAmelCase :Optional[Any] = False
__lowerCAmelCase :Dict = False
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : List[Any] = MobileNetVaModelTester(self )
a__ : Tuple = MobileNetVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str = model_class(__lowercase )
a__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[Any] = [*signature.parameters.keys()]
a__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__lowercase , __lowercase , __lowercase ):
a__ : Dict = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(__lowercase , __lowercase ) )
a__ : List[Any] = outputs.hidden_states
a__ : List[Any] = 2_6
self.assertEqual(len(__lowercase ) , __lowercase )
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Union[str, Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Dict = MobileNetVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
a__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(__lowercase )
a__ : Union[str, Any] = self.default_image_processor
a__ : Optional[Any] = prepare_img()
a__ : List[str] = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
a__ : Tuple = model(**__lowercase )
# verify the logits
a__ : Tuple = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __lowercase )
a__ : Tuple = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
| 170
| 1
|
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def __eq__( self , _UpperCamelCase ):
"""simple docstring"""
return self.position == cell.position
def UpperCamelCase__ ( self ):
"""simple docstring"""
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase=(5, 5) ):
"""simple docstring"""
lowerCAmelCase__ = np.zeros(_UpperCamelCase )
lowerCAmelCase__ = world_size[0]
lowerCAmelCase__ = world_size[1]
def UpperCamelCase__ ( self ):
"""simple docstring"""
print(self.w )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowerCAmelCase__ = cell.position[0]
lowerCAmelCase__ = cell.position[1]
lowerCAmelCase__ = []
for n in neughbour_cord:
lowerCAmelCase__ = current_x + n[0]
lowerCAmelCase__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowerCAmelCase__ = Cell()
lowerCAmelCase__ = (x, y)
lowerCAmelCase__ = cell
neighbours.append(_UpperCamelCase )
return neighbours
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = []
_open.append(UpperCamelCase_ )
while _open:
lowerCAmelCase__ = np.argmin([n.f for n in _open] )
lowerCAmelCase__ = _open[min_f]
_closed.append(_open.pop(UpperCamelCase_ ) )
if current == goal:
break
for n in world.get_neigbours(UpperCamelCase_ ):
for c in _closed:
if c == n:
continue
lowerCAmelCase__ = current.g + 1
lowerCAmelCase__ , lowerCAmelCase__ = n.position
lowerCAmelCase__ , lowerCAmelCase__ = goal.position
lowerCAmelCase__ = (ya - ya) ** 2 + (xa - xa) ** 2
lowerCAmelCase__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCamelCase_ )
lowerCAmelCase__ = []
while current.parent is not None:
path.append(current.position )
lowerCAmelCase__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__snake_case : Any = Gridworld()
# Start position and goal
__snake_case : Union[str, Any] = Cell()
__snake_case : str = (0, 0)
__snake_case : Optional[Any] = Cell()
__snake_case : Tuple = (4, 4)
print(f'path from {start.position} to {goal.position}')
__snake_case : str = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__snake_case : Tuple = 1
print(world.w)
| 122
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = '''microsoft/speecht5_tts'''
_SCREAMING_SNAKE_CASE : Any = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_SCREAMING_SNAKE_CASE : int = '''text_reader'''
_SCREAMING_SNAKE_CASE : List[str] = SpeechTaProcessor
_SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaForTextToSpeech
_SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGan
_SCREAMING_SNAKE_CASE : Optional[int] = ['''text''']
_SCREAMING_SNAKE_CASE : List[Any] = ['''audio''']
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.post_processor is None:
lowerCAmelCase__ = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowerCAmelCase__ = self.pre_processor(text=_UpperCamelCase , return_tensors='pt' , truncation=_UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
lowerCAmelCase__ = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
lowerCAmelCase__ = torch.tensor(embeddings_dataset[73_05]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(_UpperCamelCase ).cpu().detach()
| 122
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 86
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
| 86
| 1
|
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = limit + 1
UpperCAmelCase_ = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
UpperCAmelCase_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 353
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ = Dataset.from_dict(snake_case_ )
return dataset
class __A ( UpperCamelCase__ ):
def _lowercase (self : str ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ = make_duplicate_clusters(__a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ = deduplicate_dataset(__a )
self.assertEqual(len(__a ) , 2 )
print(__a )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , __a )
| 106
| 0
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(SCREAMING_SNAKE_CASE__ , '''_dynamo''' ):
return False
return isinstance(SCREAMING_SNAKE_CASE__ , torch._dynamo.eval_frame.OptimizedModule )
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : bool = True ):
UpperCamelCase :int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase :Dict = is_compiled_module(SCREAMING_SNAKE_CASE__ )
if is_compiled:
UpperCamelCase :List[str] = model
UpperCamelCase :Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = model.module
if not keep_fpaa_wrapper:
UpperCamelCase :Dict = getattr(SCREAMING_SNAKE_CASE__ , '''forward''' )
UpperCamelCase :Optional[int] = model.__dict__.pop('''_original_forward''' , SCREAMING_SNAKE_CASE__ )
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE__ , '''__wrapped__''' ):
UpperCamelCase :Union[str, Any] = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase :Dict = forward
if getattr(SCREAMING_SNAKE_CASE__ , '''_converted_to_transformer_engine''' , SCREAMING_SNAKE_CASE__ ):
convert_model(SCREAMING_SNAKE_CASE__ , to_transformer_engine=SCREAMING_SNAKE_CASE__ )
if is_compiled:
UpperCamelCase :str = model
UpperCamelCase :int = compiled_model
return model
def _A ( ):
PartialState().wait_for_everyone()
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@contextmanager
def _A ( **SCREAMING_SNAKE_CASE__ : Dict ):
for key, value in kwargs.items():
UpperCamelCase :Any = str(SCREAMING_SNAKE_CASE__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not hasattr(SCREAMING_SNAKE_CASE__ , '''__qualname__''' ) and not hasattr(SCREAMING_SNAKE_CASE__ , '''__name__''' ):
UpperCamelCase :Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , '''__class__''' , SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , '''__qualname__''' ):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE__ , '''__name__''' ):
return obj.__name__
return str(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :str = destination.setdefault(SCREAMING_SNAKE_CASE__ , {} )
merge_dicts(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :Any = value
return destination
def _A ( SCREAMING_SNAKE_CASE__ : int = None ):
if port is None:
UpperCamelCase :Any = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 259
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=19 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=[1, 2, 3, 4, 5] , SCREAMING_SNAKE_CASE_=25 , SCREAMING_SNAKE_CASE_=5 , ) -> str:
UpperCamelCase :Any = d_model
UpperCamelCase :List[str] = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :str = prediction_length
UpperCamelCase :str = context_length
UpperCamelCase :int = cardinality
UpperCamelCase :Optional[Any] = num_time_features
UpperCamelCase :Optional[Any] = lags_sequence
UpperCamelCase :str = embedding_dimension
UpperCamelCase :str = is_training
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :int = num_attention_heads
UpperCamelCase :Tuple = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :List[Any] = attention_probs_dropout_prob
UpperCamelCase :Optional[int] = context_length
UpperCamelCase :Tuple = prediction_length + label_length
UpperCamelCase :Optional[Any] = label_length
UpperCamelCase :Optional[int] = moving_average
UpperCamelCase :Union[str, Any] = autocorrelation_factor
def UpperCAmelCase ( self ) -> Optional[int]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :Optional[Any] = config.context_length + max(config.lags_sequence )
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCamelCase :List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCamelCase :Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
UpperCamelCase :Any = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCamelCase :Tuple = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCamelCase :int = floats_tensor([self.batch_size, config.prediction_length] )
UpperCamelCase :Union[str, Any] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :int = self.get_config()
UpperCamelCase :Union[str, Any] = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE_ )
return config, inputs_dict
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase , UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :int = AutoformerModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = outputs.encoder_last_hidden_state
UpperCamelCase :str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase :Any = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :int = model.create_network_inputs(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCamelCase :Tuple = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCamelCase :Optional[Any] = encoder(inputs_embeds=SCREAMING_SNAKE_CASE_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCamelCase :Optional[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCamelCase :Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCamelCase :Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCamelCase :Optional[Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase :Union[str, Any] = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = decoder(
trend=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(AutoformerForPrediction,) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] ={'feature-extraction': AutoformerModel} if is_torch_available() else {}
UpperCamelCase_ : Any =False
UpperCamelCase_ : List[str] =False
UpperCamelCase_ : Dict =False
UpperCamelCase_ : Dict =False
UpperCamelCase_ : int =False
UpperCamelCase_ : Optional[int] =False
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :str = AutoformerModelTester(self )
UpperCamelCase :int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase , UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase :Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :str = inspect.signature(getattr(SCREAMING_SNAKE_CASE_ , '''forward''' ) )
# The main input is the name of the argument after `self`
UpperCamelCase :List[str] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Tuple = [*signature.parameters.keys()]
UpperCamelCase :Optional[Any] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE_ )] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Dict = True
UpperCamelCase :Dict = getattr(self.model_tester , '''seq_length''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = getattr(self.model_tester , '''decoder_seq_length''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = getattr(self.model_tester , '''encoder_seq_length''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = getattr(self.model_tester , '''d_model''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = getattr(self.model_tester , '''num_attention_heads''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCamelCase :Tuple = True
UpperCamelCase :Tuple = False
UpperCamelCase :Any = True
UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase :int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase :Dict = True
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase :Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :List[str] = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# decoder attentions
UpperCamelCase :Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCamelCase :Union[str, Any] = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCamelCase :Any = True
UpperCamelCase :int = True
UpperCamelCase :Any = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase :Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase ( self ) -> List[Any]:
super().test_retain_grad_hidden_states_attentions()
def _A ( SCREAMING_SNAKE_CASE__ : int="train-batch.pt" ):
UpperCamelCase :Union[str, Any] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
UpperCamelCase :Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )
return batch
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :int = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = prepare_batch()
with torch.no_grad():
UpperCamelCase :Optional[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
UpperCamelCase :Union[str, Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Any = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
UpperCamelCase :Dict = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
UpperCamelCase :Union[str, Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[int] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
UpperCamelCase :Tuple = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
UpperCamelCase :Optional[int] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE_ , rtol=1e-1 ) )
| 259
| 1
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =IFPipeline
a : str =TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
a : Union[str, Any] =TEXT_TO_IMAGE_BATCH_PARAMS
a : Optional[int] =PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self ):
return self._get_dummy_components()
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_snake_case ).startswith('mps' ):
UpperCamelCase_: int = torch.manual_seed(_snake_case )
else:
UpperCamelCase_: Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCamelCase_: str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _a ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self ):
self._test_save_load_local()
def _a ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
# if
UpperCamelCase_: List[Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
UpperCamelCase_: Dict = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=_snake_case , tokenizer=_snake_case )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase_: Union[str, Any] = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase_: Tuple = None
UpperCamelCase_: List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_snake_case , _snake_case , _snake_case , _snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase_: Dict = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase_: Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_snake_case , _snake_case , _snake_case , _snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase_: str = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase_: Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_snake_case , _snake_case , _snake_case , _snake_case )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase_: List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_: Any = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='np' , )
UpperCamelCase_: List[str] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCamelCase_: Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCamelCase_: List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase_: Any = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_: Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_snake_case )
UpperCamelCase_: Dict = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_: int = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCamelCase_: Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCamelCase_: Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase_: str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_snake_case )
UpperCamelCase_: str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_: int = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='np' , )
UpperCamelCase_: List[str] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCamelCase_: Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCamelCase_: Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase_: List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_: Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_snake_case )
UpperCamelCase_: List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_snake_case )
UpperCamelCase_: Any = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_: Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCamelCase_: Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCamelCase_: List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase_: List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_snake_case )
UpperCamelCase_: Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(_snake_case )
UpperCamelCase_: Dict = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_: Any = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='np' , )
UpperCamelCase_: Union[str, Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCamelCase_: str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCamelCase_: List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase_: Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_: Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_snake_case )
UpperCamelCase_: str = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_snake_case )
UpperCamelCase_: int = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(_snake_case )
UpperCamelCase_: List[str] = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_: str = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCamelCase_: Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCamelCase_: Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
def snake_case () -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 351
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =CLIPTokenizer
a : List[Any] =CLIPTokenizerFast
a : str =True
a : List[Any] ={}
a : str =False
def _a ( self ):
super().setUp()
# fmt: off
UpperCamelCase_: str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase_: Any = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCamelCase_: Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
UpperCamelCase_: Any = {'unk_token': '<unk>'}
UpperCamelCase_: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = 'lower newer'
UpperCamelCase_: Tuple = 'lower newer'
return input_text, output_text
def _a ( self ):
UpperCamelCase_: Dict = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase_: Optional[int] = 'lower newer'
UpperCamelCase_: Any = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
UpperCamelCase_: Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = tokens + [tokenizer.unk_token]
UpperCamelCase_: Optional[Any] = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@require_ftfy
def _a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Optional[int] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
UpperCamelCase_: Tuple = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: Any = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase_: List[str] = 'xa\u0303y' + ' ' + 'x\xe3y'
UpperCamelCase_: List[Any] = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: int = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase_: Dict = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase_: int = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: Dict = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase_: List[str] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase_: Optional[Any] = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: str = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase_: str = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase_: Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , )
UpperCamelCase_: int = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: List[str] = f''' {text}'''
UpperCamelCase_: str = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , )
UpperCamelCase_: Dict = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ) + 1, 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
def _a ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_lowerCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _a ( self ):
super().test_tokenization_python_rust_equals()
def _a ( self ):
# CLIP always lower cases letters
pass
| 292
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[int] =logging.get_logger(__name__)
a__ : int ={
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any ="gpt_neox_japanese"
def __init__( self : Any , __A : List[Any]=3_2_0_0_0 , __A : Tuple=2_5_6_0 , __A : str=3_2 , __A : int=3_2 , __A : Optional[int]=4 , __A : Optional[Any]="gelu" , __A : List[Any]=1.00 , __A : str=1_0_0_0_0 , __A : Union[str, Any]=2_0_4_8 , __A : str=0.02 , __A : Optional[int]=1e-5 , __A : str=True , __A : Any=3_1_9_9_6 , __A : Union[str, Any]=3_1_9_9_9 , __A : Dict=0.1 , __A : str=0.0 , **__A : Dict , ):
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_multiple_size
__UpperCamelCase = hidden_act
__UpperCamelCase = rotary_pct
__UpperCamelCase = rotary_emb_base
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = use_cache
__UpperCamelCase = attention_dropout
__UpperCamelCase = hidden_dropout
| 53
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
default='''cifar10''', metadata={'''help''': '''Name of a dataset from the datasets package'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''The column name of the images in the files.'''})
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''A folder containing the training data.'''})
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''A folder containing the validation data.'''})
UpperCamelCase__ = field(
default=0.15, metadata={'''help''': '''Percent to split off of train for validation.'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = {}
if self.train_dir is not None:
lowercase_ : int = self.train_dir
if self.validation_dir is not None:
lowercase_ : Optional[int] = self.validation_dir
lowercase_ : Optional[Any] = data_files if data_files else None
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''})
UpperCamelCase__ = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Name or path of preprocessor config.'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
UpperCamelCase__ = field(
default=0.75, metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''})
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(
default=1e-3, metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''})
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
lowercase_ : List[Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ : str = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ : List[str] = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ : int = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ : Union[str, Any] = split["""train"""]
lowercase_ : Tuple = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : Any = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase_ : Any = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ : List[str] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ : Dict = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ : str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ : List[str] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase_ : str = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ : Optional[int] = ViTMAEForPreTraining(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ : Union[str, Any] = ds["""train"""].column_names
else:
lowercase_ : str = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ : Optional[int] = data_args.image_column_name
elif "image" in column_names:
lowercase_ : Tuple = """image"""
elif "img" in column_names:
lowercase_ : int = """img"""
else:
lowercase_ : List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase_ : Union[str, Any] = image_processor.size["""shortest_edge"""]
else:
lowercase_ : str = (image_processor.size["""height"""], image_processor.size["""width"""])
lowercase_ : List[str] = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCAmelCase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCAmelCase__ : List[str] ):
lowercase_ : int = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ : Optional[Any] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ : Union[str, Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Compute absolute learning rate
lowercase_ : Union[str, Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase_ : Union[str, Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowercase_ : int = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
lowercase_ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ : List[Any] = last_checkpoint
lowercase_ : str = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ : int = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ : Union[str, Any] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 364
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : Union[str, Any] = "src/transformers"
_lowercase : str = "docs/source/en"
_lowercase : Union[str, Any] = "."
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
# Find the start prompt.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
lowercase_ : int = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]:
lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ )
lowercase_ : List[str] = (width - text_length) // 2
lowercase_ : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase ( ) -> Any:
lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase_ : Optional[int] = slow_tokenizers
lowercase_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase_ : Optional[Any] = fast_tokenizers
lowercase_ : Dict = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : str = tf_models
lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : List[str] = flax_models
lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : Tuple = pt_models
lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ : int = True
break
# Try again after removing the last word in the name
lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] )
# Let's build that table!
lowercase_ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns]
lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase_ : int = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase_ : str = model_name_to_prefix[name]
lowercase_ : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str:
lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase_ : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21
| 0
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a :List[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _lowercase ( __lowerCAmelCase ) -> List[str]:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
SCREAMING_SNAKE_CASE__ : str = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = PegasusConfig(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusForConditionalGeneration(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE__ : Tuple = v.T
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : Any = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : int = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _lowercase ( __lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.train.list_variables(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Any = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
SCREAMING_SNAKE_CASE__ : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE__ : str = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = array
return tf_weights
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
# save tokenizer first
SCREAMING_SNAKE_CASE__ : Any = Path(__lowerCAmelCase ).parent.name
SCREAMING_SNAKE_CASE__ : Dict = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
SCREAMING_SNAKE_CASE__ : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tf_weights_as_numpy(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE__ : Tuple = task_specific_params
SCREAMING_SNAKE_CASE__ : str = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
a :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
a :Optional[Any] = parser.parse_args()
if args.save_dir is None:
a :List[Any] = Path(args.tf_ckpt_path).parent.name
a :Optional[Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 132
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str
_SCREAMING_SNAKE_CASE :int
def _lowercase ( __lowerCAmelCase ) -> list[str]:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__lowerCAmelCase ) )]
def _lowercase ( __lowerCAmelCase ) -> BWTTransformDict:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
SCREAMING_SNAKE_CASE__ : str = all_rotations(__lowerCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE__ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowerCAmelCase ),
}
return response
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
SCREAMING_SNAKE_CASE__ : List[Any] = int(__lowerCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__lowerCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
SCREAMING_SNAKE_CASE__ : str = [""""""] * len(__lowerCAmelCase )
for _ in range(len(__lowerCAmelCase ) ):
for i in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__ : str = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a :Union[str, Any] = "Provide a string that I will generate its BWT transform: "
a :str = input(entry_msg).strip()
a :int = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
a :int = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 132
| 1
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a( A : Tuple , A : List[str] , A : Tuple , A : int ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(A , A ):
a = np.full((len(A ), sequence_length, 2) , A )
else:
a = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
a = tensor[:sequence_length]
else:
a = tensor[:sequence_length]
else:
if isinstance(A , A ):
a = tensor[:sequence_length]
else:
a = tensor[:sequence_length]
return out_tensor.tolist()
def a( A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a = ord(A )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
a = unicodedata.category(A )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = 42
__A = True
__A = None
__A = None
__A = -100
__A = "pt"
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
import torch
a = "label" if "label" in features[0].keys() else "labels"
a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
a = self.tokenizer.pad(
lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
a = torch.tensor(batch["entity_ids"] ).shape[1]
a = self.tokenizer.padding_side
if padding_side == "right":
a = [
list(lowerCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase_ )) for label in labels
]
else:
a = [
[self.label_pad_token_id] * (sequence_length - len(lowerCamelCase_ )) + list(lowerCamelCase_ ) for label in labels
]
a = [feature["ner_tags"] for feature in features]
a = padding_tensor(lowerCamelCase_ , -1 , lowerCamelCase_ , lowerCamelCase_ )
a = [feature["original_entity_spans"] for feature in features]
a = padding_tensor(lowerCamelCase_ , (-1, -1) , lowerCamelCase_ , lowerCamelCase_ )
a = {k: torch.tensor(lowerCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 71
|
def a( A : int = 200 ) -> int:
"""simple docstring"""
a = [1, 2, 5, 10, 20, 50, 100, 200]
a = [0] * (pence + 1)
a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 71
| 1
|
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__a ):
for j in range(__a ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) ,end='''\t''' )
else:
print('''INF''' ,end='''\t''' )
print()
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = [[float('''inf''' ) for _ in range(__a )] for _ in range(__a )]
for i in range(__a ):
for j in range(__a ):
_a : Tuple = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__a ):
# looping through rows of graph array
for i in range(__a ):
# looping through columns of graph array
for j in range(__a ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_a : Union[str, Any] = dist[i][k] + dist[k][j]
_print_dist(__a ,__a )
return dist, v
if __name__ == "__main__":
a__ = int(input('''Enter number of vertices: '''))
a__ = int(input('''Enter number of edges: '''))
a__ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
a__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
a__ = int(input('''Enter source:'''))
a__ = int(input('''Enter destination:'''))
a__ = float(input('''Enter weight:'''))
a__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 235
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (DDPMScheduler,)
def __lowercase ( self , **_a ) -> Any:
_a : List[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_a )
return config
def __lowercase ( self ) -> Any:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __lowercase ( self ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __lowercase ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __lowercase ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowercase ( self ) -> Dict:
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __lowercase ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> int:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_a )
def __lowercase ( self ) -> int:
_a : int = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config()
_a : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowercase ( self ) -> Tuple:
_a : int = self.scheduler_classes[0]
_a : int = self.get_scheduler_config()
_a : int = scheduler_class(**_a )
_a : Optional[int] = len(_a )
_a : Optional[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
_a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
_a : str = model(_a , _a )
# 2. predict previous mean of sample x_t-1
_a : Optional[int] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a : List[Any] = pred_prev_sample
_a : str = torch.sum(torch.abs(_a ) )
_a : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a : Union[str, Any] = scheduler_class(**_a )
_a : Dict = len(_a )
_a : int = self.dummy_model()
_a : Tuple = self.dummy_sample_deter
_a : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
_a : Dict = model(_a , _a )
# 2. predict previous mean of sample x_t-1
_a : int = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a : str = pred_prev_sample
_a : str = torch.sum(torch.abs(_a ) )
_a : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.scheduler_classes[0]
_a : Tuple = self.get_scheduler_config()
_a : Any = scheduler_class(**_a )
_a : Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_a )
_a : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
_a : Dict = -1
else:
_a : Tuple = timesteps[i + 1]
_a : Optional[Any] = scheduler.previous_timestep(_a )
_a : Optional[Any] = prev_t.item()
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**_a )
_a : str = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_a , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def __lowercase ( self ) -> str:
_a : List[str] = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config()
_a : Dict = scheduler_class(**_a )
_a : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_a : Optional[Any] = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __lowercase ( self ) -> Optional[int]:
_a : Dict = self.scheduler_classes[0]
_a : Union[str, Any] = self.get_scheduler_config()
_a : int = scheduler_class(**_a )
_a : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 235
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """dandelin/vilt-b32-finetuned-vqa"""
UpperCamelCase_ :Dict = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
UpperCamelCase_ :Optional[int] = """image_qa"""
UpperCamelCase_ :int = AutoProcessor
UpperCamelCase_ :Tuple = AutoModelForVisualQuestionAnswering
UpperCamelCase_ :Optional[int] = ["""image""", """text"""]
UpperCamelCase_ :Tuple = ["""text"""]
def __init__( self , *_lowercase , **_lowercase )-> Union[str, Any]:
requires_backends(self , ["vision"] )
super().__init__(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> str:
return self.pre_processor(_lowercase , _lowercase , return_tensors="pt" )
def UpperCAmelCase_ ( self , _lowercase )-> str:
with torch.no_grad():
return self.model(**_lowercase ).logits
def UpperCAmelCase_ ( self , _lowercase )-> List[Any]:
UpperCamelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 60
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def UpperCAmelCase_ ( *_lowercase , **_lowercase )-> Optional[int]:
pass
@is_pipeline_test
@require_vision
class __magic_name__ ( unittest.TestCase ):
@require_torch
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase_ = image_classifier(_lowercase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowercase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
UpperCamelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
] , )
@require_tf
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase_ = image_classifier(_lowercase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
UpperCamelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
] , )
@slow
@require_torch
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase_ = image_classifier(_lowercase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
UpperCamelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase_ = image_classifier(_lowercase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
UpperCamelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 60
| 1
|
a__: Union[str, Any] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a__: Union[str, Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : str )->float:
A__ = from_type.lower().strip('''s''' )
A__ = to_type.lower().strip('''s''' )
A__ = UNIT_SYMBOL.get(UpperCamelCase__ , UpperCamelCase__ )
A__ = UNIT_SYMBOL.get(UpperCamelCase__ , UpperCamelCase__ )
if from_sanitized not in METRIC_CONVERSION:
A__ = (
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
if to_sanitized not in METRIC_CONVERSION:
A__ = (
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
A__ = METRIC_CONVERSION[from_sanitized]
A__ = METRIC_CONVERSION[to_sanitized]
A__ = 1
if from_exponent > to_exponent:
A__ = from_exponent - to_exponent
else:
A__ = -(to_exponent - from_exponent)
return value * pow(10 , UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 193
|
def UpperCamelCase__( UpperCamelCase__ : Dict )->Dict:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
A__ = len(UpperCamelCase__ )
A__ = max(UpperCamelCase__ )
A__ = min(UpperCamelCase__ )
# create the counting array
A__ = coll_max + 1 - coll_min
A__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
A__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
A__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
A__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->Tuple:
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
a__: Dict = input('Enter numbers separated by a comma:\n').strip()
a__: Any = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 193
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = '''microsoft/speecht5_tts'''
_SCREAMING_SNAKE_CASE : Any = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_SCREAMING_SNAKE_CASE : int = '''text_reader'''
_SCREAMING_SNAKE_CASE : List[str] = SpeechTaProcessor
_SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaForTextToSpeech
_SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGan
_SCREAMING_SNAKE_CASE : Optional[int] = ['''text''']
_SCREAMING_SNAKE_CASE : List[Any] = ['''audio''']
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.post_processor is None:
lowerCAmelCase__ = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowerCAmelCase__ = self.pre_processor(text=_UpperCamelCase , return_tensors='pt' , truncation=_UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
lowerCAmelCase__ = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
lowerCAmelCase__ = torch.tensor(embeddings_dataset[73_05]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(_UpperCamelCase ).cpu().detach()
| 359
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__snake_case : Tuple = """sshleifer/mar_enro_6_3_student"""
class __SCREAMING_SNAKE_CASE ( __lowercase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_UpperCamelCase , )
lowerCAmelCase__ = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
MarianMTModel.from_pretrained(_UpperCamelCase )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowerCAmelCase__ = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
lowerCAmelCase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
lowerCAmelCase__ = bash_script.replace(_UpperCamelCase , str(_UpperCamelCase ) )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCAmelCase__ = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCAmelCase__ = ['finetune.py'] + bash_script.split() + args
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(_UpperCamelCase )
lowerCAmelCase__ = SummarizationModule.add_model_specific_args(_UpperCamelCase , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = main(_UpperCamelCase )
# Check metrics
lowerCAmelCase__ = load_json(model.metrics_save_path )
lowerCAmelCase__ = metrics['val'][0]
lowerCAmelCase__ = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _UpperCamelCase )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase__ = os.listdir(_UpperCamelCase )
lowerCAmelCase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCAmelCase__ = os.path.join(args.output_dir , _UpperCamelCase )
lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location='cpu' )
lowerCAmelCase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase__ = {os.path.basename(_UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class __SCREAMING_SNAKE_CASE ( __lowercase):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
lowerCAmelCase__ = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_28,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowerCAmelCase__ = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
lowerCAmelCase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
lowerCAmelCase__ = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
lowerCAmelCase__ = bash_script.replace(_UpperCamelCase , str(_UpperCamelCase ) )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = bash_script.replace('--fp16' , '' )
lowerCAmelCase__ = 6
lowerCAmelCase__ = (
['distillation.py']
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
'--gpus=1',
'--learning_rate=1e-3',
F"--num_train_epochs={epochs}",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(_UpperCamelCase )
lowerCAmelCase__ = SummarizationDistiller.add_model_specific_args(_UpperCamelCase , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCAmelCase__ = distill_main(_UpperCamelCase )
# Check metrics
lowerCAmelCase__ = load_json(model.metrics_save_path )
lowerCAmelCase__ = metrics['val'][0]
lowerCAmelCase__ = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _UpperCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase__ = os.listdir(_UpperCamelCase )
lowerCAmelCase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCAmelCase__ = os.path.join(args.output_dir , _UpperCamelCase )
lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location='cpu' )
lowerCAmelCase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase__ = {os.path.basename(_UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 122
| 0
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_snake_case = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_snake_case = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCamelCase : List[str] = bs[:]
lowerCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a__ )
cs.append(2**8 + n )
n += 1
lowerCamelCase : List[str] = [chr(a__ ) for n in cs]
return dict(zip(a__ , a__ ) )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = set()
lowerCamelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase : Optional[int] = char
return pairs
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : str = VOCAB_FILES_NAMES
__A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Any = ["""input_ids""", """attention_mask"""]
def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : Dict = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
lowerCamelCase : Dict = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
lowerCamelCase : List[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
lowerCamelCase : Dict = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
lowerCamelCase : List[str] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
lowerCamelCase : Any = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : List[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
lowerCamelCase : Dict = json.load(__UpperCamelCase )
lowerCamelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
lowerCamelCase : Any = errors # how to handle errors in decoding
lowerCamelCase : Optional[Any] = bytes_to_unicode()
lowerCamelCase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
lowerCamelCase : int = merges_handle.read().split("\n" )[1:-1]
lowerCamelCase : int = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase : Optional[int] = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
lowerCamelCase : List[Any] = {}
lowerCamelCase : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self ):
"""simple docstring"""
return len(self.encoder )
def _snake_case ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , __A ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase : str = tuple(__UpperCamelCase )
lowerCamelCase : int = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
lowerCamelCase : int = min(__UpperCamelCase , key=lambda __A : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase : Tuple = bigram
lowerCamelCase : List[Any] = []
lowerCamelCase : Dict = 0
while i < len(__UpperCamelCase ):
try:
lowerCamelCase : List[Any] = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase : int = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase : Union[str, Any] = tuple(__UpperCamelCase )
lowerCamelCase : List[str] = new_word
if len(__UpperCamelCase ) == 1:
break
else:
lowerCamelCase : Optional[Any] = get_pairs(__UpperCamelCase )
lowerCamelCase : Optional[Any] = " ".join(__UpperCamelCase )
lowerCamelCase : Union[str, Any] = word
return word
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = []
for token in re.findall(self.pat , __UpperCamelCase ):
lowerCamelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self , __A ):
"""simple docstring"""
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.decoder.get(__UpperCamelCase )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[str] = "".join(__UpperCamelCase )
lowerCamelCase : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : Optional[Any] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
lowerCamelCase : str = 0
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase : Optional[Any] = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Union[str, Any] = [self.cls_token_id]
lowerCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __A , __A=False , **__A ):
"""simple docstring"""
lowerCamelCase : int = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCamelCase : List[Any] = " " + text
return (text, kwargs)
def _snake_case ( self , __A , __A = None , __A = PaddingStrategy.DO_NOT_PAD , __A = None , __A = None , ):
"""simple docstring"""
lowerCamelCase : str = super()._pad(
encoded_inputs=__UpperCamelCase , max_length=__UpperCamelCase , padding_strategy=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(__UpperCamelCase )
if needs_to_be_padded:
lowerCamelCase : Dict = len(__UpperCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 283
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : int = ["""image_processor""", """tokenizer"""]
A__ : Union[str, Any] = """LayoutLMv2ImageProcessor"""
A__ : Optional[int] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
UpperCamelCase_ = kwargs.pop("""feature_extractor""" )
UpperCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
UpperCamelCase_ = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase_ = features["""words"""]
UpperCamelCase_ = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
UpperCamelCase_ = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase_ = self.get_overflowing_images(__UpperCamelCase , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase_ = images
return encoded_inputs
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}''' )
return images_with_overflow
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor
| 122
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __a ( lowerCAmelCase_ : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCAmelCase_= MaskFormerConfig(backbone_config=lowerCAmelCase_ )
UpperCAmelCase_= """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_= 8_47
UpperCAmelCase_= """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_= 1_50
UpperCAmelCase_= """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_= 1_71
UpperCAmelCase_= """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
UpperCAmelCase_= 1_33
UpperCAmelCase_= """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_= 19
UpperCAmelCase_= """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_= 65
UpperCAmelCase_= """mapillary-vistas-id2label.json"""
UpperCAmelCase_= json.load(open(hf_hub_download(lowerCAmelCase_ ,lowerCAmelCase_ ,repo_type="""dataset""" ) ,"""r""" ) )
UpperCAmelCase_= {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def __a ( lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __a ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= dct.pop(lowerCAmelCase_ )
UpperCAmelCase_= val
def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_= num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_= state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_= state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_= in_proj_weight[:dim, :]
UpperCAmelCase_= in_proj_bias[: dim]
UpperCAmelCase_= in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_= in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_= in_proj_weight[
-dim :, :
]
UpperCAmelCase_= in_proj_bias[-dim :]
# fmt: on
def __a ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_= state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_= state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_= in_proj_weight[: hidden_size, :]
UpperCAmelCase_= in_proj_bias[:config.hidden_size]
UpperCAmelCase_= in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_= in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_= in_proj_weight[-hidden_size :, :]
UpperCAmelCase_= in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_= state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_= state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_= in_proj_weight[: hidden_size, :]
UpperCAmelCase_= in_proj_bias[:config.hidden_size]
UpperCAmelCase_= in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_= in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_= in_proj_weight[-hidden_size :, :]
UpperCAmelCase_= in_proj_bias[-hidden_size :]
# fmt: on
def __a ( ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_= """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_= Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ,lowerCAmelCase_ : str ,lowerCAmelCase_ : bool = False ) -> int:
'''simple docstring'''
UpperCAmelCase_= get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_ ,"""rb""" ) as f:
UpperCAmelCase_= pickle.load(lowerCAmelCase_ )
UpperCAmelCase_= data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_= create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_ ,config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_ ,lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_= torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
UpperCAmelCase_= MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_ ,param.shape )
UpperCAmelCase_, UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_= prepare_img()
if "vistas" in model_name:
UpperCAmelCase_= 65
elif "cityscapes" in model_name:
UpperCAmelCase_= 6_55_35
else:
UpperCAmelCase_= 2_55
UpperCAmelCase_= True if """ade""" in model_name else False
UpperCAmelCase_= MaskFormerImageProcessor(ignore_index=lowerCAmelCase_ ,reduce_labels=lowerCAmelCase_ )
UpperCAmelCase_= image_processor(lowerCAmelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_= model(**lowerCAmelCase_ )
print("""Logits:""" ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_= torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,lowerCAmelCase_ ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 277
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[List[List[str]]] , __UpperCAmelCase : List[List[str]] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase )
}
| 277
| 1
|
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :int ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(snake_case_ , snake_case_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__UpperCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__UpperCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
|
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self : Tuple , _lowercase : str , _lowercase : str ):
__UpperCAmelCase , __UpperCAmelCase = text, pattern
__UpperCAmelCase , __UpperCAmelCase = len(_lowercase ), len(_lowercase )
def a ( self : Optional[int] , _lowercase : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def a ( self : int , _lowercase : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def a ( self : Optional[Any] ):
# searches pattern in text and returns index positions
__UpperCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCAmelCase = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
__UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
__UpperCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowercase : str = 'ABAABA'
_lowercase : Tuple = 'AB'
_lowercase : Dict = BoyerMooreSearch(text, pattern)
_lowercase : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 332
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ) -> int:
"""simple docstring"""
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , """config.json""" ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , """config.json""" ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE_ , """config.json""" ) )
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE_ , """pytorch_model.bin""" ) )
else:
os.makedirs(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowercase_ : Any = 2
if unlogit:
lowercase_ : Optional[int] = torch.pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ : str = p * torch.log(SCREAMING_SNAKE_CASE_ )
lowercase_ : int = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase ( UpperCAmelCase__ : str ) -> int:
"""simple docstring"""
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
for row in range(len(SCREAMING_SNAKE_CASE_ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]=False ) -> int:
"""simple docstring"""
lowercase_ , lowercase_ : str = model.config.num_hidden_layers, model.config.num_attention_heads
lowercase_ : int = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
lowercase_ : List[str] = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
if head_mask is None:
lowercase_ : Union[str, Any] = torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase_ : Any = None
lowercase_ : Union[str, Any] = 0.0
lowercase_ : Any = 0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE_ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
lowercase_ : str = tuple(t.to(args.device ) for t in inputs )
((lowercase_ ) , ) : Optional[int] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase_ : Optional[int] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase_ , lowercase_ , lowercase_ : List[str] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase_ : Union[str, Any] = entropy(attn.detach() , SCREAMING_SNAKE_CASE_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase_ : Optional[int] = 2
lowercase_ : Any = torch.pow(torch.pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
lowercase_ : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
logger.info("""Head ranked by importance scores""" )
lowercase_ : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowercase_ : Dict = torch.arange(
head_importance.numel() , device=args.device )
lowercase_ : str = head_ranks.view_as(SCREAMING_SNAKE_CASE_ )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ : Optional[int] = compute_heads_importance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , SCREAMING_SNAKE_CASE_ , original_score * args.masking_threshold )
lowercase_ : int = torch.ones_like(SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowercase_ : Optional[int] = original_score
while current_score >= original_score * args.masking_threshold:
lowercase_ : Optional[int] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase_ : Optional[Any] = float("""Inf""" )
lowercase_ : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(SCREAMING_SNAKE_CASE_ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
lowercase_ : Any = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
lowercase_ : Union[str, Any] = new_head_mask.view(-1 )
lowercase_ : List[str] = 0.0
lowercase_ : Dict = new_head_mask.view_as(SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
# Compute metric and head importance again
lowercase_ , lowercase_ , lowercase_ : Any = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , SCREAMING_SNAKE_CASE_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase_ : Optional[int] = datetime.now()
lowercase_ , lowercase_ , lowercase_ : int = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , compute_importance=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
lowercase_ : int = 1 / loss
lowercase_ : Any = datetime.now() - before_time
lowercase_ : Dict = sum(p.numel() for p in model.parameters() )
lowercase_ : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : Optional[int] = [
v,
]
assert sum(len(SCREAMING_SNAKE_CASE_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = sum(p.numel() for p in model.parameters() )
lowercase_ : int = datetime.now()
lowercase_ , lowercase_ , lowercase_ : Any = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , compute_importance=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , actually_pruned=SCREAMING_SNAKE_CASE_ , )
lowercase_ : Union[str, Any] = 1 / loss
lowercase_ : Tuple = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(SCREAMING_SNAKE_CASE_ , args.output_dir )
def lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=SCREAMING_SNAKE_CASE_ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=SCREAMING_SNAKE_CASE_ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=SCREAMING_SNAKE_CASE_ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=SCREAMING_SNAKE_CASE_ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=SCREAMING_SNAKE_CASE_ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=SCREAMING_SNAKE_CASE_ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE_ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=SCREAMING_SNAKE_CASE_ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=SCREAMING_SNAKE_CASE_ , default="""""" , help="""Can be used for distant debugging.""" )
lowercase_ : Optional[int] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
lowercase_ : str = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowercase_ : Any = torch.device("""cuda""" , args.local_rank )
lowercase_ : List[str] = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowercase_ : Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowercase_ : Optional[int] = nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE_ )
elif args.n_gpu > 1:
lowercase_ : Optional[Any] = nn.DataParallel(SCREAMING_SNAKE_CASE_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE_ )
# Prepare dataset
lowercase_ : List[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowercase_ : str = (torch.from_numpy(SCREAMING_SNAKE_CASE_ ),)
lowercase_ : int = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowercase_ : str = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowercase_ : str = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase_ : int = mask_heads(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
prune_heads(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 365
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __magic_name__ ( ctypes.Structure):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase ( ) -> List[Any]:
if os.name == "nt":
lowercase_ : List[Any] = CursorInfo()
lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> str:
if os.name == "nt":
lowercase_ : int = CursorInfo()
lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> Any:
try:
hide_cursor()
yield
finally:
show_cursor()
| 21
| 0
|
"""simple docstring"""
from __future__ import annotations
def a_ ( lowerCamelCase , lowerCamelCase ):
if nth_term == "":
return [""]
UpperCAmelCase__ = int(lowerCamelCase )
UpperCAmelCase__ = int(lowerCamelCase )
UpperCAmelCase__ = []
for temp in range(int(lowerCamelCase ) ):
series.append(f'''1 / {pow(temp + 1 , int(lowerCamelCase ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : Optional[int] = int(input('Enter the last number (nth term) of the P-Series'))
lowerCAmelCase__ : Optional[Any] = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 98
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def a( A : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def a( A : np.ndarray , A : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
a = XGBClassifier()
classifier.fit(A , A )
return classifier
def a( ) -> None:
"""simple docstring"""
a = load_iris()
a , a = data_handling(A )
a , a , a , a = train_test_split(
A , A , test_size=0.25 )
a = iris["target_names"]
# Create an XGBoost Classifier from the training data
a = xgboost(A , A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A , A , A , display_labels=A , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 227
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = abs(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = abs(lowerCAmelCase_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return sum(int(lowerCAmelCase_ ) for c in str(abs(lowerCAmelCase_ ) ) )
def UpperCAmelCase__ ():
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
__SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
__SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup="import __main__" )
print(f"""{call:56} = {func(lowerCAmelCase_ )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 195
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : List[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "efficientformer"
def __init__( self : Any , UpperCAmelCase__ : List[int] = [3, 2, 6, 4] , UpperCAmelCase__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , UpperCAmelCase__ : List[bool] = [True, True, True, True] , UpperCAmelCase__ : int = 4_4_8 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 5 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1_6 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = 1E-5 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1E-12 , UpperCAmelCase__ : int = 2_2_4 , UpperCAmelCase__ : float = 1E-05 , **UpperCAmelCase__ : Tuple , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_expansion_ratio
__SCREAMING_SNAKE_CASE = downsamples
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = key_dim
__SCREAMING_SNAKE_CASE = attention_ratio
__SCREAMING_SNAKE_CASE = resolution
__SCREAMING_SNAKE_CASE = pool_size
__SCREAMING_SNAKE_CASE = downsample_patch_size
__SCREAMING_SNAKE_CASE = downsample_stride
__SCREAMING_SNAKE_CASE = downsample_pad
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = num_metaad_blocks
__SCREAMING_SNAKE_CASE = distillation
__SCREAMING_SNAKE_CASE = use_layer_scale
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = batch_norm_eps
| 195
| 1
|
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__A : Tuple = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowerCamelCase_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 0
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE : List[Any] = """docs/source/en/_toctree.yml"""
def _lowerCAmelCase ( UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] =defaultdict(UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] =[]
UpperCamelCase__ : Optional[int] =[]
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(UpperCAmelCase )
UpperCamelCase__ : List[str] =new_doc_list
UpperCamelCase__ : Optional[Any] =[key for key, value in counts.items() if value > 1]
UpperCamelCase__ : Any =[]
for duplicate_key in duplicates:
UpperCamelCase__ : Optional[Any] =list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(UpperCAmelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCamelCase__ : Union[str, Any] =sorted(UpperCAmelCase , key=lambda UpperCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCAmelCase ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(UpperCAmelCase )
# Sort
return overview_doc
def _lowerCAmelCase ( UpperCAmelCase : Optional[int]=False ):
'''simple docstring'''
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
UpperCamelCase__ : int =yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ : Dict =0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ : Tuple =content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ : Dict =0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase__ : List[Any] =api_doc[scheduler_idx]['''sections''']
UpperCamelCase__ : Union[str, Any] =clean_doc_toc(UpperCAmelCase )
UpperCamelCase__ : Dict =False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase__ : List[Any] =True
if overwrite:
UpperCamelCase__ : Union[str, Any] =new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase__ : Tuple =api_doc
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCAmelCase , allow_unicode=UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any]=False ):
'''simple docstring'''
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Dict =yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ : Union[str, Any] =0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ : Tuple =content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ : List[str] =0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : List[str] =api_doc[pipeline_idx]['''sections''']
UpperCamelCase__ : Union[str, Any] =[]
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase__ : Tuple =pipeline_doc['''section''']
UpperCamelCase__ : Dict =clean_doc_toc(UpperCAmelCase )
if overwrite:
UpperCamelCase__ : Optional[Any] =new_sub_pipeline_doc
new_pipeline_docs.append(UpperCAmelCase )
# sort overall pipeline doc
UpperCamelCase__ : Optional[Any] =clean_doc_toc(UpperCAmelCase )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase__ : Any =True
if overwrite:
UpperCamelCase__ : Optional[int] =new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase__ : List[Any] =api_doc
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCAmelCase , allow_unicode=UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 157
|
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
_SCREAMING_SNAKE_CASE : List[Any] = """1"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """0"""
_SCREAMING_SNAKE_CASE : List[str] = """1"""
_SCREAMING_SNAKE_CASE : Optional[int] = ort.SessionOptions()
_SCREAMING_SNAKE_CASE : Tuple = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
_SCREAMING_SNAKE_CASE : int = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
_SCREAMING_SNAKE_CASE : Dict = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
_SCREAMING_SNAKE_CASE : Optional[int] = ort.RunOptions()
_SCREAMING_SNAKE_CASE : Tuple = 1_2_8
_SCREAMING_SNAKE_CASE : List[Any] = 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
_SCREAMING_SNAKE_CASE : str = np.ones((batch, sequence), dtype=np.intaa)
_SCREAMING_SNAKE_CASE : int = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
_SCREAMING_SNAKE_CASE : Any = time.time()
_SCREAMING_SNAKE_CASE : str = 2_0_0_0
_SCREAMING_SNAKE_CASE : List[Any] = {}
for iter in range(max_iters):
_SCREAMING_SNAKE_CASE : Any = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 157
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=0 ):
'''simple docstring'''
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[column] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=float("inf" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE = current_dis
return min_dis
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=float("inf" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCAmelCase_ ):
for j in range(max(0 , i - 6 ) , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE = current_dis
return min_dis
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase_ , lowerCAmelCase_ )
# recursion
__SCREAMING_SNAKE_CASE = points_counts // 2
__SCREAMING_SNAKE_CASE = closest_pair_of_points_sqr(
lowerCAmelCase_ , points_sorted_on_y[:mid] , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = closest_pair_of_points_sqr(
lowerCAmelCase_ , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = dis_between_closest_in_strip(
lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return min(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = column_based_sort(lowerCAmelCase_ , column=0 )
__SCREAMING_SNAKE_CASE = column_based_sort(lowerCAmelCase_ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
) ** 0.5
if __name__ == "__main__":
a__ : List[str] = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 54
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase = None
UpperCamelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = True
snake_case = None
# Automatically constructed
snake_case = "PIL.Image.Image"
snake_case = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
snake_case = field(default="Image" , init=UpperCamelCase , repr=UpperCamelCase )
def __call__( self )->int:
'''simple docstring'''
return self.pa_type
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )->"PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
A_ : List[str] = {}
A_ , A_ : str = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
A_ : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
A_ : List[str] = path.split('''::''' )[-1]
try:
A_ : int = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )['''repo_id''']
A_ : Optional[int] = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
A_ : Any = None
with xopen(_SCREAMING_SNAKE_CASE , '''rb''' , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
A_ : Optional[Any] = BytesIO(f.read() )
A_ : Dict = PIL.Image.open(bytes_ )
else:
A_ : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _snake_case ( self )->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
A_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
A_ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
A_ : Tuple = storage.field('''bytes''' )
else:
A_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
A_ : Optional[Any] = storage.field('''path''' )
else:
A_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A_ : str = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : List[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : Any = f.read()
return bytes_
A_ : Dict = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ : List[Any] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
A_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def _SCREAMING_SNAKE_CASE ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A_ : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Dict = BytesIO()
if image.format in list_image_compression_formats():
A_ : Tuple = image.format
else:
A_ : List[str] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(SCREAMING_SNAKE_CASE , format=SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if hasattr(SCREAMING_SNAKE_CASE , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
A_ : Union[str, Any] = array.dtype
A_ : Dict = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
A_ : Any = dtype.kind
A_ : Any = dtype.itemsize
A_ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A_ : List[Any] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A_ : int = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A_ : Any = dtype_byteorder + dtype_kind + str(SCREAMING_SNAKE_CASE )
A_ : Optional[int] = np.dtype(SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
A_ : Tuple = PIL.Image.fromarray(array.astype(SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
A_ , A_ : Union[str, Any] = first_non_null_value(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A_ : Tuple = no_op_if_value_is_null(SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
A_ : List[str] = no_op_if_value_is_null(SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 186
| 0
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomTokenizerFast
lowerCAmelCase__ = BloomTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = "tokenizer_file"
lowerCAmelCase__ = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : List[Any] , **__SCREAMING_SNAKE_CASE : Tuple ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__SCREAMING_SNAKE_CASE = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__SCREAMING_SNAKE_CASE = tokenizer.batch_encode_plus(__SCREAMING_SNAKE_CASE )["""input_ids"""]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple=6 ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__SCREAMING_SNAKE_CASE = """This is a simple input"""
__SCREAMING_SNAKE_CASE = ["""This is a simple input 1""", """This is a simple input 2"""]
__SCREAMING_SNAKE_CASE = ("""This is a simple input""", """This is a pair""")
__SCREAMING_SNAKE_CASE = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
tokenizer_r.batch_encode_plus(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
tokenizer_r.encode(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
tokenizer_r.batch_encode_plus(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__SCREAMING_SNAKE_CASE = None # Hotfixing padding = None
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" , )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" , )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = next(iter(__SCREAMING_SNAKE_CASE ) )["""premise"""] # pick up one data
__SCREAMING_SNAKE_CASE = list(sample_data.values() )
__SCREAMING_SNAKE_CASE = list(map(tokenizer.encode , __SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = [tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) for x in output_tokens]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "markuplm"
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
# additional properties
__SCREAMING_SNAKE_CASE = max_depth
__SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings
__SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings
__SCREAMING_SNAKE_CASE = tag_pad_id
__SCREAMING_SNAKE_CASE = subs_pad_id
__SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
| 331
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.