code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : str = 'bert-generation'
def __init__( self , SCREAMING_SNAKE_CASE_=5_03_58 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = hidden_act
lowercase__ : Optional[int] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : List[Any] = position_embedding_type
lowercase__ : Dict = use_cache
| 12 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ):
'''simple docstring'''
lowercase__ : str = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : Any = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Any = rotary_dim
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[int] = initializer_range
lowercase__ : Optional[int] = None
lowercase__ : str = vocab_size - 1
lowercase__ : Any = vocab_size - 1
lowercase__ : Dict = vocab_size - 1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : List[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = 20
lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase__ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : List[str] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : str = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = 20
lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : Any = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
@require_flax
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = FlaxGPTJModelTester(self)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""")
lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : Optional[Any] = False
lowercase__ : List[str] = model.config.eos_token_id
lowercase__ : List[Any] = jax.jit(model.generate)
lowercase__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences
lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape
lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : str = 0
lowercase__ : List[Any] = 1
lowercase__ : Dict = 0
lowercase__ : Any = 1
lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = fx_state
with torch.no_grad():
lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params)
lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape
lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = 0
lowercase__ : int = 1
lowercase__ : str = 0
lowercase__ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_)
with torch.no_grad():
lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : int = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
| 12 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str], _UpperCAmelCase : str, _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : Any=7, _UpperCAmelCase : int=True, _UpperCAmelCase : Union[str, Any]=True, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Dict=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : int=4, _UpperCAmelCase : int=3_7, _UpperCAmelCase : Optional[int]="gelu", _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Tuple=5_1_2, _UpperCAmelCase : List[str]=1_6, _UpperCAmelCase : List[Any]=2, _UpperCAmelCase : int=0.02, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Optional[Any]=4, _UpperCAmelCase : Tuple=None, _UpperCAmelCase : List[Any]=1_0_0_0, ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : List[str] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = seq_length
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Tuple = num_choices
SCREAMING_SNAKE_CASE__ : int = scope
SCREAMING_SNAKE_CASE__ : Any = range_bbox
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ : List[str] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ : Tuple = t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.convert_to_tensor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE__ : Any = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : str, _UpperCAmelCase : str, _UpperCAmelCase : Any, _UpperCAmelCase : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFLayoutLMModel(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = model(_UpperCAmelCase, _UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = model(_UpperCAmelCase, _UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def A_ ( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : str, _UpperCAmelCase : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFLayoutLMForMaskedLM(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Tuple, _UpperCAmelCase : List[str], _UpperCAmelCase : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[int], _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFLayoutLMForSequenceClassification(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, _UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A_ ( self : Dict, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int, _UpperCAmelCase : Any, _UpperCAmelCase : Tuple, _UpperCAmelCase : int, _UpperCAmelCase : List[Any], _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFLayoutLMForTokenClassification(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = model(_UpperCAmelCase, _UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Any, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : str, _UpperCAmelCase : Tuple, _UpperCAmelCase : List[str], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFLayoutLMForQuestionAnswering(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = model(_UpperCAmelCase, _UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A_ ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = 10
def A_ ( self : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFLayoutLMModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
def A_ ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
@slow
def A_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Tuple = TFLayoutLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def A_ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
def _a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
SCREAMING_SNAKE_CASE__ : List[str] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE__ : int = model(input_ids=_UpperCAmelCase, bbox=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
# test the sequence output on [0, :3, :3]
SCREAMING_SNAKE_CASE__ : List[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], _UpperCAmelCase, atol=1E-3 ) )
# test the pooled output on [1, :3]
SCREAMING_SNAKE_CASE__ : List[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], _UpperCAmelCase, atol=1E-3 ) )
@slow
def A_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2 )
SCREAMING_SNAKE_CASE__ : int = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE__ : str = model(
input_ids=_UpperCAmelCase, bbox=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=tf.convert_to_tensor([1, 1] ), )
# test whether we get a loss as a scalar
SCREAMING_SNAKE_CASE__ : str = outputs.loss
SCREAMING_SNAKE_CASE__ : Any = (2,)
self.assertEqual(loss.shape, _UpperCAmelCase )
# test the shape of the logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.logits
SCREAMING_SNAKE_CASE__ : Tuple = (2, 2)
self.assertEqual(logits.shape, _UpperCAmelCase )
@slow
def A_ ( self : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=1_3 )
SCREAMING_SNAKE_CASE__ : Any = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
input_ids=_UpperCAmelCase, bbox=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase )
# test the shape of the logits
SCREAMING_SNAKE_CASE__ : Tuple = outputs.logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape, _UpperCAmelCase )
@slow
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
SCREAMING_SNAKE_CASE__ : Dict = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE__ : Dict = model(input_ids=_UpperCAmelCase, bbox=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
# test the shape of the logits
SCREAMING_SNAKE_CASE__ : List[str] = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape, _UpperCAmelCase )
self.assertEqual(outputs.end_logits.shape, _UpperCAmelCase )
| 714 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str, _UpperCAmelCase : str, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : List[str]=3, _UpperCAmelCase : Dict=4, _UpperCAmelCase : Dict=2, _UpperCAmelCase : Tuple=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=True, _UpperCAmelCase : Union[str, Any]=True, _UpperCAmelCase : int=True, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Any=3_6, _UpperCAmelCase : int=3, _UpperCAmelCase : str=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Optional[int]="gelu", _UpperCAmelCase : Optional[Any]=0.1, _UpperCAmelCase : List[Any]=0.1, _UpperCAmelCase : Any=5_1_2, _UpperCAmelCase : str=1_6, _UpperCAmelCase : List[Any]=2, _UpperCAmelCase : List[Any]=0.02, _UpperCAmelCase : Tuple=6, _UpperCAmelCase : Optional[Any]=6, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=4, _UpperCAmelCase : Optional[Any]=None, _UpperCAmelCase : str=1_0_0_0, ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : int = patch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_seq_length
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : str = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : int = coordinate_size
SCREAMING_SNAKE_CASE__ : Any = shape_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = num_choices
SCREAMING_SNAKE_CASE__ : Optional[int] = scope
SCREAMING_SNAKE_CASE__ : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE__ : Optional[int] = text_seq_length
SCREAMING_SNAKE_CASE__ : str = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.text_seq_length + self.image_seq_length
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ : Any = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ : Any = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ : Dict = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ : Tuple = t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A_ ( self : int, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : str, _UpperCAmelCase : List[str], _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Dict, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = LayoutLMvaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# text + image
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, pixel_values=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE__ : int = model(pixel_values=_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def A_ ( self : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : List[Any], _UpperCAmelCase : Any, _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.num_labels
SCREAMING_SNAKE_CASE__ : int = LayoutLMvaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A_ ( self : Any, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : int = LayoutLMvaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def A_ ( self : Optional[int], _UpperCAmelCase : str, _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[str], _UpperCAmelCase : Tuple, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = LayoutLMvaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, start_positions=_UpperCAmelCase, end_positions=_UpperCAmelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def A_ ( self : List[Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : List[Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Tuple, _UpperCAmelCase : Any=False ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = copy.deepcopy(_UpperCAmelCase )
if model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = {
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(_UpperCAmelCase, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=_UpperCAmelCase )
elif model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_UpperCAmelCase )
elif model_class in [
*get_values(_UpperCAmelCase ),
]:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_UpperCAmelCase )
elif model_class in [
*get_values(_UpperCAmelCase ),
]:
SCREAMING_SNAKE_CASE__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=_UpperCAmelCase, )
return inputs_dict
def A_ ( self : List[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
def A_ ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
@slow
def A_ ( self : List[str] ) -> Any:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = LayoutLMvaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase ) if is_vision_available() else None
@slow
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=_UpperCAmelCase, return_tensors="pt" ).pixel_values.to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
input_ids=input_ids.to(_UpperCAmelCase ), bbox=bbox.to(_UpperCAmelCase ), pixel_values=pixel_values.to(_UpperCAmelCase ), )
# verify the logits
SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], _UpperCAmelCase, atol=1E-4 ) )
| 157 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCAmelCase__ :
def __init__( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : int=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : str=True , __UpperCamelCase : Optional[Any]=99 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : int=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[str]=512 , __UpperCamelCase : Any=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=4 , __UpperCamelCase : List[str]=None , ) -> List[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = embedding_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> Optional[int]:
A = MegatronBertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ) -> Union[str, Any]:
A = MegatronBertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ) -> int:
A = MegatronBertForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> Any:
A = MegatronBertForNextSentencePrediction(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) -> Optional[Any]:
A = MegatronBertForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , next_sentence_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ) -> Union[str, Any]:
A = MegatronBertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> List[str]:
A = self.num_labels
A = MegatronBertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ) -> Tuple:
A = self.num_labels
A = MegatronBertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ) -> List[Any]:
A = self.num_choices
A = MegatronBertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Dict = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = True
# test_resize_embeddings = False
A_ : Union[str, Any] = False
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Tuple=False ) -> Any:
A = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def __UpperCamelCase ( self : str ) -> str:
A = MegatronBertModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : int ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCamelCase )
def __UpperCamelCase ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCamelCase )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCamelCase )
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return torch.tensor(
lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , )
__snake_case :Optional[Any] =1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.' )
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
A = os.path.join(os.environ['MYDIR'] , __UpperCamelCase )
A = MegatronBertModel.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
model.half()
A = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
A = model(__UpperCamelCase )[0]
A = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , __UpperCamelCase )
A = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
A = output[0, ii, jj]
A = expected[3 * ii + jj]
A = 'ii={} jj={} a={} b={}'.format(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.assertTrue(math.isclose(__UpperCamelCase , __UpperCamelCase , rel_tol=__UpperCamelCase , abs_tol=__UpperCamelCase ) , msg=__UpperCamelCase ) | 106 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 592 | 0 |
"""simple docstring"""
import functools
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
@functools.cache
def min_distance(lowercase__ : int , lowercase__ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ :List[str] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowercase__ ) , 1 + min_distance(lowercase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _snake_case ( lowercase__ : float ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _snake_case ( lowercase__ : float , lowercase__ : float ) -> float:
'''simple docstring'''
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 256 | 0 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
SCREAMING_SNAKE_CASE__ : str = HUGGINGFACE_HUB_CACHE
SCREAMING_SNAKE_CASE__ : Tuple = '''config.json'''
SCREAMING_SNAKE_CASE__ : str = '''diffusion_pytorch_model.bin'''
SCREAMING_SNAKE_CASE__ : int = '''diffusion_flax_model.msgpack'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''model.onnx'''
SCREAMING_SNAKE_CASE__ : str = '''diffusion_pytorch_model.safetensors'''
SCREAMING_SNAKE_CASE__ : Tuple = '''weights.pb'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''https://huggingface.co'''
SCREAMING_SNAKE_CASE__ : str = default_cache_path
SCREAMING_SNAKE_CASE__ : int = '''diffusers_modules'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
SCREAMING_SNAKE_CASE__ : str = ['''fp16''', '''non-ema''']
SCREAMING_SNAKE_CASE__ : List[str] = '''.self_attn'''
| 538 |
'''simple docstring'''
from __future__ import annotations
def a ( UpperCamelCase_ : str , UpperCamelCase_ : list[str] | None = None , UpperCamelCase_ : dict[str, float] | None = None , UpperCamelCase_ : bool = False , ) -> tuple[int, float, str]:
snake_case__ =cipher_alphabet or [chr(UpperCamelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
snake_case__ ={
'a': 0.0_8_4_9_7,
'b': 0.0_1_4_9_2,
'c': 0.0_2_2_0_2,
'd': 0.0_4_2_5_3,
'e': 0.1_1_1_6_2,
'f': 0.0_2_2_2_8,
'g': 0.0_2_0_1_5,
'h': 0.0_6_0_9_4,
'i': 0.0_7_5_4_6,
'j': 0.0_0_1_5_3,
'k': 0.0_1_2_9_2,
'l': 0.0_4_0_2_5,
'm': 0.0_2_4_0_6,
'n': 0.0_6_7_4_9,
'o': 0.0_7_5_0_7,
'p': 0.0_1_9_2_9,
'q': 0.0_0_0_9_5,
'r': 0.0_7_5_8_7,
's': 0.0_6_3_2_7,
't': 0.0_9_3_5_6,
'u': 0.0_2_7_5_8,
'v': 0.0_0_9_7_8,
'w': 0.0_2_5_6_0,
'x': 0.0_0_1_5_0,
'y': 0.0_1_9_9_4,
'z': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
snake_case__ =frequencies_dict
if not case_sensitive:
snake_case__ =ciphertext.lower()
# Chi squared statistic values
snake_case__ ={}
# cycle through all of the shifts
for shift in range(len(UpperCamelCase_ ) ):
snake_case__ =''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
snake_case__ =(alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCamelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
snake_case__ =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
snake_case__ =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ =decrypted_with_shift.lower().count(UpperCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ =decrypted_with_shift.count(UpperCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
snake_case__ =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
snake_case__ =min(
UpperCamelCase_ , key=UpperCamelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
snake_case__
) , (
snake_case__
) ,
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 538 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A : Optional[int] =logging.get_logger(__name__)
_A : Optional[int] ={
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowercase ( a__ ):
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Dict , UpperCamelCase__: Optional[int]=100 , UpperCamelCase__: List[str]=5 , UpperCamelCase__: Dict=1 , UpperCamelCase__: str=1 , UpperCamelCase__: Dict=249 , UpperCamelCase__: List[str]=6 , UpperCamelCase__: Union[str, Any]=17 , UpperCamelCase__: Tuple=25 , UpperCamelCase__: List[str]=4 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Any=128 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: str=0.0_006 , UpperCamelCase__: Optional[int]=512 , UpperCamelCase__: Any=0.02 , UpperCamelCase__: Any=1e-12 , UpperCamelCase__: Optional[Any]=1 , UpperCamelCase__: Any=True , UpperCamelCase__: List[str]=1 , UpperCamelCase__: Union[str, Any]=50_256 , UpperCamelCase__: Any=50_256 , **UpperCamelCase__: Any , ):
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Optional[int] = action_weight
lowerCamelCase__ : Dict = reward_weight
lowerCamelCase__ : Optional[Any] = value_weight
lowerCamelCase__ : Optional[Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = block_size
lowerCamelCase__ : Dict = action_dim
lowerCamelCase__ : Dict = observation_dim
lowerCamelCase__ : Optional[int] = transition_dim
lowerCamelCase__ : List[Any] = learning_rate
lowerCamelCase__ : Any = n_layer
lowerCamelCase__ : List[Any] = n_head
lowerCamelCase__ : Any = n_embd
lowerCamelCase__ : List[str] = embd_pdrop
lowerCamelCase__ : int = attn_pdrop
lowerCamelCase__ : Tuple = resid_pdrop
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Dict = kaiming_initializer_range
lowerCamelCase__ : Any = use_cache
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) | 710 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 0 |
from __future__ import annotations
class a :
"""simple docstring"""
def __init__( self : Dict , __lowercase : int , __lowercase : Tuple ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : int = text, pattern
__UpperCAmelCase , __UpperCAmelCase : str = len(_snake_case ), len(_snake_case )
def UpperCAmelCase ( self : str , __lowercase : Any ) -> str:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Union[str, Any] ) -> Optional[int]:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : List[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCAmelCase : Union[str, Any] = self.mismatch_in_text(_snake_case )
if mismatch_index == -1:
positions.append(_snake_case )
else:
__UpperCAmelCase : List[str] = self.match_in_pattern(self.text[mismatch_index] )
__UpperCAmelCase : Any = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
a : int = '''ABAABA'''
a : List[str] = '''AB'''
a : Any = BoyerMooreSearch(text, pattern)
a : List[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 63 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCamelCase_ :
"""simple docstring"""
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : List[Any] ) -> int:
raise NotImplementedError()
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
raise NotImplementedError()
class UpperCamelCase_ ( A_):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] = False , **UpperCAmelCase__ : Any ) -> List[str]:
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = skip_prompt
__SCREAMING_SNAKE_CASE = decode_kwargs
# variables used in the streaming process
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = True
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> str:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
__SCREAMING_SNAKE_CASE = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__SCREAMING_SNAKE_CASE = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
__SCREAMING_SNAKE_CASE = text[self.print_len :]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# If the last token is a CJK character, we print the characters.
elif len(UpperCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__SCREAMING_SNAKE_CASE = text[self.print_len :]
self.print_len += len(UpperCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__SCREAMING_SNAKE_CASE = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(UpperCAmelCase__ )
self.on_finalized_text(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__SCREAMING_SNAKE_CASE = text[self.print_len :]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = True
self.on_finalized_text(UpperCAmelCase__ , stream_end=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple = False ) -> List[Any]:
print(UpperCAmelCase__ , flush=UpperCAmelCase__ , end="" if not stream_end else None )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple ) -> Any:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
class UpperCamelCase_ ( A_):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] = False , UpperCAmelCase__ : Dict = None , **UpperCAmelCase__ : int ) -> Tuple:
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = Queue()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = timeout
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = False ) -> List[Any]:
self.text_queue.put(UpperCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : str ) -> Dict:
return self
def UpperCAmelCase_ ( self : Dict ) -> Any:
__SCREAMING_SNAKE_CASE = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 708 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Union[str, Any] = DiTPipeline
snake_case__ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ : Any = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ : Optional[Any] = False
def UpperCAmelCase_ ( self : Dict ) -> Dict:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_0_0_0 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = AutoencoderKL()
__SCREAMING_SNAKE_CASE = DDIMScheduler()
__SCREAMING_SNAKE_CASE = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=0 ) -> Optional[int]:
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = "cpu"
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
__SCREAMING_SNAKE_CASE = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self : int ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__SCREAMING_SNAKE_CASE = ["vase", "umbrella", "white shark", "white wolf"]
__SCREAMING_SNAKE_CASE = pipe.get_label_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=4_0 , output_type="np" ).images
for word, image in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__SCREAMING_SNAKE_CASE = ["vase", "umbrella"]
__SCREAMING_SNAKE_CASE = pipe.get_label_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=2_5 , output_type="np" ).images
for word, image in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 553 | 0 |
"""simple docstring"""
__A = 9.8_0665
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = g ) ->float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 93 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
lowercase = model.config
lowercase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
if "encoder.model" in name:
lowercase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase = """encoder.""" + name
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = int(key_split[5] )
lowercase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : str=None , lowercase_ : Optional[Any]=False ):
# load original model
lowercase = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
lowercase , lowercase = get_configs(lowercase_ )
lowercase = DonutSwinModel(lowercase_ )
lowercase = MBartForCausalLM(lowercase_ )
lowercase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
lowercase = original_model.state_dict()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
lowercase = load_dataset("""hf-internal-testing/example-documents""" )
lowercase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
lowercase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase = DonutProcessor(lowercase_ , lowercase_ )
lowercase = processor(lowercase_ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase = """When is the coffee break?"""
lowercase = task_prompt.replace("""{user_input}""" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase = original_model.encoder.model.patch_embed(lowercase_ )
lowercase , lowercase = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
lowercase = original_model.encoder(lowercase_ )
lowercase = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
lowercase = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
lowercase = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
lowercase_ : Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 588 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a = "▁"
__a = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = BertGenerationTokenizer
_A : List[Any] = False
_A : List[Any] = True
def lowerCAmelCase_ ( self: List[Any] ) -> int:
super().setUp()
snake_case_ :str = BertGenerationTokenizer(snake_case , keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
snake_case_ :Any = """<s>"""
snake_case_ :Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCAmelCase_ ( self: str ) -> Any:
snake_case_ :Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(snake_case ) , 1_002 )
def lowerCAmelCase_ ( self: int ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case_ :Any = BertGenerationTokenizer(snake_case , keep_accents=snake_case )
snake_case_ :Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) , [285, 46, 10, 170, 382] , )
snake_case_ :Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case_ :Dict = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case_ :Dict = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase_ ( self: str ) -> int:
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_ :Optional[Any] = """Hello World!"""
snake_case_ :Union[str, Any] = [18_536, 2_260, 101]
self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_ :Dict = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
snake_case_ :List[str] = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) )
@require_torch
@slow
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
snake_case_ :List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case_ :List[str] = """ """.join(snake_case )
snake_case_ :int = self.big_tokenizer.encode_plus(snake_case , return_tensors="""pt""" , return_token_type_ids=snake_case )
snake_case_ :List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=snake_case )
snake_case_ :Optional[Any] = BertGenerationConfig()
snake_case_ :Dict = BertGenerationEncoder(snake_case )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case )
model(**snake_case )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
# fmt: off
snake_case_ :Optional[Any] = {"""input_ids""": [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 310 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any )-> Tuple:
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = BlipImageProcessor()
__UpperCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
__UpperCamelCase = BlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def A ( self : List[Any] , **A_ : List[str] )-> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def A ( self : int , **A_ : Optional[int] )-> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def A ( self : Optional[int] )-> int:
shutil.rmtree(self.tmpdirname )
def A ( self : List[str] )-> Any:
__UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : List[Any] )-> int:
__UpperCamelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCamelCase = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
__UpperCamelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def A ( self : Any )-> Any:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(lowerCAmelCase_ , return_tensors="np" )
__UpperCamelCase = processor(images=lowerCAmelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : str )-> Optional[int]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
__UpperCamelCase = "lower newer"
__UpperCamelCase = processor(text=lowerCAmelCase_ )
__UpperCamelCase = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : str )-> Optional[int]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
__UpperCamelCase = "lower newer"
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def A ( self : List[str] )-> str:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(lowerCAmelCase_ )
__UpperCamelCase = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def A ( self : Any )-> Optional[Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
__UpperCamelCase = "lower newer"
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 505 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _a ( snake_case_ ):
def _snake_case ( self ) -> int:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : int = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowercase_ )
def _snake_case ( self ) -> Any:
lowerCAmelCase : Union[str, Any] = self._create_example_records()
lowerCAmelCase : Optional[int] = Dataset.from_list(lowercase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowercase_ ):
self.assertDictEqual(lowercase_ , example_records[i] )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : List[Any] = self._create_example_records()
lowerCAmelCase : Optional[Any] = Dataset.from_list(lowercase_ )
lowerCAmelCase : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _snake_case ( self ) -> List[Any]: # checks what happens with missing columns
lowerCAmelCase : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
lowerCAmelCase : List[Any] = Dataset.from_list(lowercase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def _snake_case ( self ) -> List[Any]: # checks if the type can be inferred from the second record
lowerCAmelCase : Union[str, Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
lowerCAmelCase : Optional[Any] = Dataset.from_list(lowercase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(lowercase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 693 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 1 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase = 1000 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = 2**power
__UpperCAmelCase : str = str(UpperCamelCase )
__UpperCAmelCase : Optional[int] = list(UpperCamelCase )
__UpperCAmelCase : List[str] = 0
for i in list_num:
sum_of_num += int(UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
A = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
A = solution(power)
print("""Sum of the digits is: """, result)
| 77 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__UpperCAmelCase : Optional[Any] = v.half()
if save_path is None: # overwrite src_path
__UpperCAmelCase : str = src_path
torch.save(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 77 | 1 |
"""simple docstring"""
import baseaa
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->bytes:
return baseaa.baaencode(string.encode("""utf-8""" ) )
def _lowerCAmelCase ( UpperCAmelCase__ : bytes ) ->str:
return baseaa.baadecode(UpperCAmelCase__ ).decode("""utf-8""" )
if __name__ == "__main__":
A_ = '''Hello World!'''
A_ = baseaa_encode(test)
print(encoded)
A_ = baseaa_decode(encoded)
print(decoded)
| 498 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
A__ : str = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
A__ : List[Any] = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
A__ : List[str] = max(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ), b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case ( A__ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any]) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = SMALL_MODEL_IDENTIFIER
_snake_case : Tuple = """pt"""
_snake_case : Union[str, Any] = """tf"""
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
_snake_case : Dict = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(snake_case_)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case_)
model_tf.save_pretrained(snake_case_)
def UpperCamelCase_ ( self : str) -> Dict:
"""simple docstring"""
_snake_case : Optional[int] = """mock_framework"""
# Framework provided - return whatever the user provides
_snake_case : str = FeaturesManager.determine_framework(self.test_model , snake_case_)
self.assertEqual(snake_case_ , snake_case_)
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case_)
_snake_case : Optional[int] = FeaturesManager.determine_framework(snake_case_ , snake_case_)
self.assertEqual(snake_case_ , snake_case_)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case_)
_snake_case : str = FeaturesManager.determine_framework(snake_case_ , snake_case_)
self.assertEqual(snake_case_ , snake_case_)
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case_)
_snake_case : str = FeaturesManager.determine_framework(snake_case_)
self.assertEqual(snake_case_ , self.framework_pt)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case_)
_snake_case : List[Any] = FeaturesManager.determine_framework(snake_case_)
self.assertEqual(snake_case_ , self.framework_tf)
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case_):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(snake_case_)
def UpperCamelCase_ ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Tuple = MagicMock(return_value=snake_case_)
with patch("""transformers.onnx.features.is_tf_available""" , snake_case_):
_snake_case : Dict = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(snake_case_ , self.framework_pt)
# PyTorch not in environment -> use TensorFlow
_snake_case : Optional[int] = MagicMock(return_value=snake_case_)
with patch("""transformers.onnx.features.is_torch_available""" , snake_case_):
_snake_case : Union[str, Any] = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(snake_case_ , self.framework_tf)
# Both in environment -> use PyTorch
_snake_case : Dict = MagicMock(return_value=snake_case_)
_snake_case : Optional[int] = MagicMock(return_value=snake_case_)
with patch("""transformers.onnx.features.is_tf_available""" , snake_case_), patch(
"""transformers.onnx.features.is_torch_available""" , snake_case_):
_snake_case : Any = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(snake_case_ , self.framework_pt)
# Both not in environment -> raise error
_snake_case : int = MagicMock(return_value=snake_case_)
_snake_case : int = MagicMock(return_value=snake_case_)
with patch("""transformers.onnx.features.is_tf_available""" , snake_case_), patch(
"""transformers.onnx.features.is_torch_available""" , snake_case_):
with self.assertRaises(snake_case_):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model)
| 477 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''efficientnet'''
def __init__( self , snake_case_ = 3 , snake_case_ = 600 , snake_case_ = 2.0 , snake_case_ = 3.1 , snake_case_ = 8 , snake_case_ = [3, 3, 5, 3, 5, 5, 3] , snake_case_ = [32, 16, 24, 40, 80, 112, 192] , snake_case_ = [16, 24, 40, 80, 112, 192, 320] , snake_case_ = [] , snake_case_ = [1, 2, 2, 2, 1, 2, 1] , snake_case_ = [1, 2, 2, 3, 3, 4, 1] , snake_case_ = [1, 6, 6, 6, 6, 6, 6] , snake_case_ = 0.25 , snake_case_ = "swish" , snake_case_ = 2_560 , snake_case_ = "mean" , snake_case_ = 0.02 , snake_case_ = 0.001 , snake_case_ = 0.99 , snake_case_ = 0.5 , snake_case_ = 0.2 , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ )
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = width_coefficient
__lowerCAmelCase = depth_coefficient
__lowerCAmelCase = depth_divisor
__lowerCAmelCase = kernel_sizes
__lowerCAmelCase = in_channels
__lowerCAmelCase = out_channels
__lowerCAmelCase = depthwise_padding
__lowerCAmelCase = strides
__lowerCAmelCase = num_block_repeats
__lowerCAmelCase = expand_ratios
__lowerCAmelCase = squeeze_expansion_ratio
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = pooling_type
__lowerCAmelCase = initializer_range
__lowerCAmelCase = batch_norm_eps
__lowerCAmelCase = batch_norm_momentum
__lowerCAmelCase = dropout_rate
__lowerCAmelCase = drop_connect_rate
__lowerCAmelCase = sum(snake_case_ ) * 4
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = version.parse('''1.11''' )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self ) -> float:
return 1e-5
| 465 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def __magic_name__ ( _lowerCamelCase : int ):
__a : List[str] = DPTConfig()
if "large" in checkpoint_url:
__a : Dict = 1_0_2_4
__a : Optional[Any] = 4_0_9_6
__a : Dict = 2_4
__a : Dict = 1_6
__a : List[str] = [5, 1_1, 1_7, 2_3]
__a : Optional[Any] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__a : List[Any] = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
__a : Tuple = True
__a : List[Any] = 1_5_0
__a : Optional[int] = """huggingface/label-files"""
__a : Union[str, Any] = """ade20k-id2label.json"""
__a : Dict = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__a : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__a : str = idalabel
__a : Dict = {v: k for k, v in idalabel.items()}
__a : Union[str, Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def __magic_name__ ( _lowerCamelCase : Tuple ):
__a : Union[str, Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : str ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__a : str = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__a : Dict = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__a : Optional[int] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__a : Dict = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__a : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__a : Optional[int] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__a : Dict = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__a : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__a : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__a : str = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__a : Dict = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__a : Tuple = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__a : List[str] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__a : Union[str, Any] = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__a : List[str] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__a : List[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__a : int = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__a : str = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__a : str = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__a : Any = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__a : str = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__a : str = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__a : Any = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__a : Any = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__a : int = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__a : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__a : Union[str, Any] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__a : Union[str, Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__a : Optional[int] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__a : Union[str, Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__a : List[Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__a : List[str] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__a : Dict = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__a : List[str] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__a : Optional[Any] = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__a : str = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__a : Optional[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__a : Tuple = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : Dict = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__a : List[str] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__a : str = in_proj_weight[: config.hidden_size, :]
__a : Dict = in_proj_bias[: config.hidden_size]
__a : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : int = in_proj_weight[
-config.hidden_size :, :
]
__a : Tuple = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( ):
__a : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a : Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ):
__a : Optional[int] = get_dpt_config(_lowerCamelCase )
# load original state_dict from URL
__a : Optional[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
__a : Dict = state_dict.pop(_lowerCamelCase )
__a : Dict = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
__a : int = DPTForSemanticSegmentation(_lowerCamelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Check outputs on an image
__a : int = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
__a : str = DPTImageProcessor(size=_lowerCamelCase )
__a : Dict = prepare_img()
__a : List[Any] = image_processor(_lowerCamelCase , return_tensors="""pt""" )
# forward pass
__a : Union[str, Any] = model(**_lowerCamelCase ).logits if """ade""" in checkpoint_url else model(**_lowerCamelCase ).predicted_depth
# Assert logits
__a : Optional[Any] = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
__a : Optional[Any] = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(_lowerCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase )
)
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
lowercase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 703 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a: Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Any = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[str] = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__a: int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 108 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _lowerCamelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
__snake_case : Optional[int] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case : Optional[Any] = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case : List[Any] = {"dtype": torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case : Union[str, Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
__snake_case : Optional[int] = np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , torch.Tensor ):
__snake_case : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : Any = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
__snake_case : Dict = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
__snake_case : Optional[Any] = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
__snake_case : Union[str, Any] = self._consolidate(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : str = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_batch(UpperCAmelCase )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
__snake_case : Dict = self._consolidate(batch[column_name] )
return batch
| 243 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __A (snake_case__):
def __init__( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ) ->Tuple:
"""simple docstring"""
snake_case_ = dataset
snake_case_ = process
snake_case_ = params
def __len__( self : Tuple ) ->List[Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : List[str] , UpperCAmelCase_ : Any ) ->Dict:
"""simple docstring"""
snake_case_ = self.dataset[i]
snake_case_ = self.process(UpperCAmelCase_ , **self.params )
return processed
class __A (snake_case__):
def __init__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str=None ) ->Dict:
"""simple docstring"""
snake_case_ = loader
snake_case_ = infer
snake_case_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
snake_case_ = None
snake_case_ = loader_batch_size
# Internal bookkeeping
snake_case_ = None
snake_case_ = None
def __len__( self : int ) ->Optional[int]:
"""simple docstring"""
return len(self.loader )
def __iter__( self : Dict ) ->List[Any]:
"""simple docstring"""
snake_case_ = iter(self.loader )
return self
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
snake_case_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
snake_case_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
# Convert ModelOutput to tuple first
snake_case_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
snake_case_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
snake_case_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
snake_case_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
snake_case_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
snake_case_ = self._loader_batch_data.__class__(UpperCAmelCase_ )
self._loader_batch_index += 1
return result
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
snake_case_ = next(self.iterator )
snake_case_ = self.infer(UpperCAmelCase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase_ , torch.Tensor ):
snake_case_ = processed
else:
snake_case_ = list(processed.keys() )[0]
snake_case_ = processed[key]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = len(UpperCAmelCase_ )
else:
snake_case_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case_ = observed_batch_size
# Setting internal index to unwrap the batch
snake_case_ = processed
snake_case_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __A (snake_case__):
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]=None ) ->str:
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __iter__( self : Dict ) ->Optional[int]:
"""simple docstring"""
snake_case_ = iter(self.loader )
snake_case_ = None
return self
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
if self.subiterator is None:
snake_case_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
snake_case_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
snake_case_ = self.infer(next(self.iterator ) , **self.params )
snake_case_ = next(self.subiterator )
return processed
class __A (snake_case__):
def __iter__( self : List[Any] ) ->int:
"""simple docstring"""
snake_case_ = iter(self.loader )
return self
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
snake_case_ = False
snake_case_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
snake_case_ = self.loader_batch_item()
snake_case_ = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
if is_last:
return accumulator
while not is_last:
snake_case_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase_ , torch.Tensor ):
snake_case_ = processed
else:
snake_case_ = list(processed.keys() )[0]
snake_case_ = processed[key]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = len(UpperCAmelCase_ )
else:
snake_case_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case_ = observed_batch_size
snake_case_ = processed
snake_case_ = 0
while self._loader_batch_index < self.loader_batch_size:
snake_case_ = self.loader_batch_item()
snake_case_ = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
if is_last:
return accumulator
else:
snake_case_ = processed
snake_case_ = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
return accumulator
class __A (snake_case__):
def __init__( self : List[Any] , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str ) ->Tuple:
"""simple docstring"""
snake_case_ = dataset
snake_case_ = key
def __len__( self : Union[str, Any] ) ->int:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
return self.dataset[i][self.key]
class __A (snake_case__):
def __init__( self : Any , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str , UpperCAmelCase_ : str ) ->Dict:
"""simple docstring"""
snake_case_ = dataset
snake_case_ = keya
snake_case_ = keya
def __len__( self : Dict ) ->List[str]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) ->Tuple:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 711 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> str:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 2 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase_ = TypeVar('T')
class A_ ( Generic[T] ):
'''simple docstring'''
__snake_case = 42 # Cache store of keys
__snake_case = 42 # References of the keys in cache
__snake_case = 10 # Maximum capacity of cache
def __init__( self: List[Any] , a: int ):
__lowerCamelCase : Optional[Any] = deque()
__lowerCamelCase : Optional[int] = set()
if not n:
__lowerCamelCase : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
__lowerCamelCase : Optional[Any] = n
def _snake_case ( self: Union[str, Any] , a: T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowerCamelCase : str = self.dq_store.pop()
self.key_reference.remove(a )
else:
self.dq_store.remove(a )
self.dq_store.appendleft(a )
self.key_reference.add(a )
def _snake_case ( self: Optional[Any] ):
for k in self.dq_store:
print(a )
def __repr__( self: Any ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 669 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 1 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
A__ = 1.0 if scale is None else scale
A__ = 0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE__ )] )
@property
def snake_case__ ( self ) -> Tuple:
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case__ ( self ) -> Dict:
return self.base_dist.variance * self.scale**2
@property
def snake_case__ ( self ) -> Optional[int]:
return self.variance.sqrt()
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = args_dim
A__ = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for dim in args_dim.values()] )
A__ = domain_map
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Tuple[torch.Tensor]:
A__ = [proj(SCREAMING_SNAKE_CASE__ ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
super().__init__()
A__ = function
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return self.function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__ :
"""simple docstring"""
A__ : type
A__ : int
A__ : Dict[str, int]
def __init__( self , SCREAMING_SNAKE_CASE__ = 1 ) -> None:
A__ = dim
A__ = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE__ )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE__ ) , 1 )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ) -> Distribution:
A__ = self._base_distribution(SCREAMING_SNAKE_CASE__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE__ , loc=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , event_dim=self.event_dim )
@property
def snake_case__ ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def snake_case__ ( self ) -> int:
return len(self.event_shape )
@property
def snake_case__ ( self ) -> float:
return 0.0
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> nn.Module:
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ ) -> Any:
raise NotImplementedError()
@staticmethod
def snake_case__ ( SCREAMING_SNAKE_CASE__ ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE__ ) + 4.0 )) / 2.0
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
A__ : type = StudentT
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = cls.squareplus(SCREAMING_SNAKE_CASE__ ).clamp_min(torch.finfo(scale.dtype ).eps )
A__ = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Dict[str, int] = {"loc": 1, "scale": 1}
A__ : type = Normal
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = cls.squareplus(SCREAMING_SNAKE_CASE__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Dict[str, int] = {"total_count": 1, "logits": 1}
A__ : type = NegativeBinomial
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = cls.squareplus(SCREAMING_SNAKE_CASE__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Distribution:
A__ , A__ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ ) , 1 )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> Distribution:
A__ , A__ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 562 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Any, UpperCAmelCase_ : List[Any], UpperCAmelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
A__ = TOKENIZER_CLASSES
else:
A__ = {tokenizer_name: getattr(UpperCAmelCase_, tokenizer_name + "Fast" )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
A__ = TOKENIZER_CLASSES[tokenizer_name]
A__ = True
if checkpoint_name is None:
A__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
A__ = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
A__ = tokenizer_class.from_pretrained(UpperCAmelCase_, force_download=UpperCAmelCase_ )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
A__ , A__ = checkpoint.split("/" )
A__ = os.path.join(UpperCAmelCase_, UpperCAmelCase_ )
elif add_prefix:
A__ = checkpoint
A__ = dump_path
else:
A__ = None
A__ = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
A__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
A__ = file_path.split(UpperCAmelCase_ )[-1][0]
if next_char == "/":
A__ = os.path.join(UpperCAmelCase_, UpperCAmelCase_ )
A__ = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
A__ = tokenizer.save_pretrained(
UpperCAmelCase_, legacy_format=UpperCAmelCase_, filename_prefix=UpperCAmelCase_ )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCAmelCase_ )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
UpperCamelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 562 | 1 |
import argparse
import datetime
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Dict = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
__magic_name__ :Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case ) < 1_1:
raise ValueError('''Must be 10 characters long''' )
# Get month
__magic_name__ :int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('''Month must be between 1 - 12''' )
__magic_name__ :str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
__magic_name__ :int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
__magic_name__ :str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
__magic_name__ :int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
__magic_name__ :List[str] = datetime.date(int(snake_case ), int(snake_case ), int(snake_case ) )
# Start math
if m <= 2:
__magic_name__ :int = y - 1
__magic_name__ :int = m + 1_2
# maths var
__magic_name__ :int = int(str(snake_case )[:2] )
__magic_name__ :int = int(str(snake_case )[2:] )
__magic_name__ :int = int(2.6 * m - 5.39 )
__magic_name__ :int = int(c / 4 )
__magic_name__ :int = int(k / 4 )
__magic_name__ :int = int(d + k )
__magic_name__ :int = int(t + u + v + x )
__magic_name__ :int = int(z - (2 * c) )
__magic_name__ :int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
__magic_name__ :str = f'''Your date {date_input}, is a {days[str(snake_case )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
zeller(args.date_input)
| 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[Any] = """sequence-classification"""
def __init__( self , __a ):
"""simple docstring"""
if type(__a ) == dict:
A__ = Namespace(**__a )
A__ = glue_output_modes[hparams.task]
A__ = glue_tasks_num_labels[hparams.task]
super().__init__(__a , __a , self.mode )
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
return self.model(**__a )
def _UpperCAmelCase ( self , __a , __a ):
"""simple docstring"""
A__ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
A__ = self(**__a )
A__ = outputs[0]
A__ = self.trainer.lr_schedulers[0]['scheduler']
A__ = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.hparams
A__ = processors[args.task]()
A__ = processor.get_labels()
for mode in ["train", "dev"]:
A__ = self._feature_file(__a )
if os.path.exists(__a ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __a )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
A__ = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
A__ = convert_examples_to_features(
__a , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __a )
torch.save(__a , __a )
def _UpperCAmelCase ( self , __a , __a , __a = False ):
"""simple docstring"""
A__ = 'dev' if mode == 'test' else mode
A__ = self._feature_file(__a )
logger.info('Loading features from cached file %s' , __a )
A__ = torch.load(__a )
A__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A__ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A__ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__a , __a , __a , __a ) , batch_size=__a , shuffle=__a , )
def _UpperCAmelCase ( self , __a , __a ):
"""simple docstring"""
A__ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
A__ = self(**__a )
A__ , A__ = outputs[:2]
A__ = logits.detach().cpu().numpy()
A__ = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
A__ = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A__ = np.argmax(__a , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A__ = np.squeeze(__a )
A__ = np.concatenate([x['target'] for x in outputs] , axis=0 )
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __a , __a )}
A__ = dict(results.items() )
A__ = results
return ret, preds_list, out_label_list
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ , A__ , A__ = self._eval_end(__a )
A__ = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ , A__ , A__ = self._eval_end(__a )
A__ = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCAmelCase ( __a , __a ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__a , __a )
parser.add_argument(
'--max_seq_length' , default=128 , type=__a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__a , required=__a , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__a , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def __lowerCamelCase ( ):
A__ = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase__ ,os.getcwd() )
A__ = GLUETransformer.add_model_specific_args(lowerCAmelCase__ ,os.getcwd() )
A__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A__ = os.path.join(
'./results' ,f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' ,)
os.makedirs(args.output_dir )
A__ = GLUETransformer(lowerCAmelCase__ )
A__ = generic_train(lowerCAmelCase__ ,lowerCAmelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A__ = sorted(glob.glob(os.path.join(args.output_dir ,'checkpoint-epoch=*.ckpt' ) ,recursive=lowerCAmelCase__ ) )
A__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 260 | 0 |
'''simple docstring'''
from math import pi, sqrt, tan
def __lowerCAmelCase ( snake_case__ ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __lowerCAmelCase ( snake_case__ ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def __lowerCAmelCase ( snake_case__ ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
__UpperCamelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(snake_case__ , 2 ) * torus_radius * tube_radius
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def __lowerCAmelCase ( snake_case__ ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
__UpperCamelCase : Any = (sidea + sidea + sidea) / 2
__UpperCamelCase : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def __lowerCAmelCase ( snake_case__ ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f'Rectangle: {area_rectangle(10, 20) = }')
print(f'Square: {area_square(10) = }')
print(f'Triangle: {area_triangle(10, 10) = }')
print(f'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(f'Parallelogram: {area_parallelogram(10, 20) = }')
print(f'Rhombus: {area_rhombus(10, 20) = }')
print(f'Trapezium: {area_trapezium(10, 20, 30) = }')
print(f'Circle: {area_circle(20) = }')
print(f'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(f'Cube: {surface_area_cube(20) = }')
print(f'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(f'Sphere: {surface_area_sphere(20) = }')
print(f'Hemisphere: {surface_area_hemisphere(20) = }')
print(f'Cone: {surface_area_cone(10, 20) = }')
print(f'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(f'Cylinder: {surface_area_cylinder(10, 20) = }')
print(f'Torus: {surface_area_torus(20, 10) = }')
print(f'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(f'Square: {area_reg_polygon(4, 10) = }')
print(f'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 721 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=True , snake_case__="pt" ):
__UpperCamelCase : List[str] = {"add_prefix_space": True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(" " ) else {}
__UpperCamelCase : int = padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding="max_length" if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=None , ):
__UpperCamelCase : Union[str, Any] = input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="train" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="" , ) -> List[str]:
super().__init__()
__UpperCamelCase : List[str] = Path(_UpperCAmelCase ).joinpath(type_path + ".source" )
__UpperCamelCase : Dict = Path(_UpperCAmelCase ).joinpath(type_path + ".target" )
__UpperCamelCase : int = self.get_char_lens(self.src_file )
__UpperCamelCase : Optional[int] = max_source_length
__UpperCamelCase : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
__UpperCamelCase : str = tokenizer
__UpperCamelCase : Optional[Any] = prefix
if n_obs is not None:
__UpperCamelCase : Tuple = self.src_lens[:n_obs]
__UpperCamelCase : Optional[Any] = src_lang
__UpperCamelCase : Any = tgt_lang
def __len__(self ) -> List[Any]:
return len(self.src_lens )
def __getitem__(self , _UpperCAmelCase ) -> Dict[str, torch.Tensor]:
__UpperCamelCase : str = index + 1 # linecache starts at 1
__UpperCamelCase : int = self.prefix + linecache.getline(str(self.src_file ) , _UpperCAmelCase ).rstrip("\n" )
__UpperCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , _UpperCAmelCase ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
)
__UpperCamelCase : str = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
__UpperCamelCase : str = encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_source_length , "right" )
__UpperCamelCase : List[Any] = encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_target_length , "right" )
__UpperCamelCase : str = source_inputs["input_ids"].squeeze()
__UpperCamelCase : List[Any] = target_inputs["input_ids"].squeeze()
__UpperCamelCase : int = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a_ (_UpperCAmelCase ) -> Optional[int]:
return [len(_UpperCAmelCase ) for x in Path(_UpperCAmelCase ).open().readlines()]
def a_ (self , _UpperCAmelCase ) -> Dict[str, torch.Tensor]:
__UpperCamelCase : str = torch.stack([x["input_ids"] for x in batch] )
__UpperCamelCase : Union[str, Any] = torch.stack([x["attention_mask"] for x in batch] )
__UpperCamelCase : Any = torch.stack([x["decoder_input_ids"] for x in batch] )
__UpperCamelCase : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
__UpperCamelCase : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
__UpperCamelCase : int = trim_batch(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase : Dict = trim_batch(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def __lowerCAmelCase ( snake_case__ ):
return list(itertools.chain.from_iterable(snake_case__ ) )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , "git_log.json" ) )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=4 , **snake_case__ ):
with open(snake_case__ , "w" ) as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def __lowerCAmelCase ( ):
__UpperCamelCase : Optional[Any] = git.Repo(search_parent_directories=snake_case__ )
__UpperCamelCase : Optional[int] = {
"repo_id": str(snake_case__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
return list(map(snake_case__ , snake_case__ ) )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
with open(snake_case__ , "wb" ) as f:
return pickle.dump(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
def remove_articles(snake_case__ ):
return re.sub(r"\b(a|an|the)\b" , " " , snake_case__ )
def white_space_fix(snake_case__ ):
return " ".join(text.split() )
def remove_punc(snake_case__ ):
__UpperCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = normalize_answer(snake_case__ ).split()
__UpperCamelCase : Any = normalize_answer(snake_case__ ).split()
__UpperCamelCase : List[str] = Counter(snake_case__ ) & Counter(snake_case__ )
__UpperCamelCase : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase : List[Any] = 1.0 * num_same / len(snake_case__ )
__UpperCamelCase : List[str] = 1.0 * num_same / len(snake_case__ )
__UpperCamelCase : int = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert len(snake_case__ ) == len(snake_case__ )
__UpperCamelCase : Optional[int] = 0
for hypo, pred in zip(snake_case__ , snake_case__ ):
em += exact_match_score(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def __lowerCAmelCase ( snake_case__ ):
return model_prefix.startswith("rag" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase : Tuple = "dropout_rate"
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__ ):
if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case__ ) )
delattr(snake_case__ , snake_case__ )
continue
__UpperCamelCase : Optional[Any] = p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) )
delattr(snake_case__ , snake_case__ )
return hparams, config
| 399 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : List[str] = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 262 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> bool:
# Base Case
if curr_ind == len(__A ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__A ) ):
if valid_connection(__A , __A , __A , __A ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(__A , __A , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def SCREAMING_SNAKE_CASE__ ( __A , __A = 0 ) -> list[int]:
_snake_case = [-1] * (len(__A ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__A , __A , 1 ) else []
| 495 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : str = size if size is not None else {"""shortest_edge""": 384}
A : List[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : str = do_resize
A : Union[str, Any] = size
# Default value set here for backwards compatibility where the value in config is None
A : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
A : Any = resample
A : List[Any] = do_rescale
A : Union[str, Any] = rescale_factor
A : Tuple = do_normalize
A : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : int = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : Dict = int(shortest_edge / crop_pct )
A : Optional[int] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : List[str] = do_resize if do_resize is not None else self.do_resize
A : str = crop_pct if crop_pct is not None else self.crop_pct
A : List[str] = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A : Optional[int] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Dict = size if size is not None else self.size
A : Dict = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Tuple = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : Dict = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Dict = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : List[str] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Any = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 520 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@staticmethod
def _lowerCAmelCase ( *lowerCamelCase__, **lowerCamelCase__ ):
pass
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = np.array(_lowerCAmelCase )
A : str = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = MaskGenerationPipeline(model=lowerCamelCase__, image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def _lowerCAmelCase ( self ):
pass
@slow
@require_torch
def _lowerCAmelCase ( self ):
A : Tuple = pipeline("""mask-generation""", model="""facebook/sam-vit-huge""" )
A : Dict = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""", points_per_batch=256 )
# Shortening by hashing
A : Union[str, Any] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__, decimals=4 ), [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
], )
# fmt: on
@require_torch
@slow
def _lowerCAmelCase ( self ):
A : Union[str, Any] = """facebook/sam-vit-huge"""
A : int = pipeline("""mask-generation""", model=lowerCamelCase__ )
A : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""", pred_iou_thresh=1, points_per_batch=256 )
# Shortening by hashing
A : Tuple = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__, decimals=4 ), [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
], )
| 520 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['pixel_values']
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = 1 / 2_55 , snake_case_ = True , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =size if size is not None else {'''shortest_edge''': 2_24}
lowercase =get_size_dict(snake_case_ , default_to_square=snake_case_ )
lowercase =crop_size if crop_size is not None else {'''height''': 2_56, '''width''': 2_56}
lowercase =get_size_dict(snake_case_ , param_name='''crop_size''' )
lowercase =do_resize
lowercase =size
lowercase =resample
lowercase =do_rescale
lowercase =rescale_factor
lowercase =do_center_crop
lowercase =crop_size
lowercase =do_flip_channel_order
def _A( self , snake_case_ , snake_case_ , snake_case_ = PIL.Image.BILINEAR , snake_case_ = None , **snake_case_ , ):
lowercase =get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase =get_resize_output_image_size(snake_case_ , size=size['''shortest_edge'''] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
lowercase =get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ = None ):
return flip_channel_order(snake_case_ , data_format=snake_case_ )
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
lowercase =do_resize if do_resize is not None else self.do_resize
lowercase =resample if resample is not None else self.resample
lowercase =do_rescale if do_rescale is not None else self.do_rescale
lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase =(
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase =size if size is not None else self.size
lowercase =get_size_dict(snake_case_ , default_to_square=snake_case_ )
lowercase =crop_size if crop_size is not None else self.crop_size
lowercase =get_size_dict(snake_case_ , param_name='''crop_size''' )
lowercase =make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
lowercase =[to_numpy_array(snake_case_ ) for image in images]
if do_resize:
lowercase =[self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
lowercase =[self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
lowercase =[self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase =[self.flip_channel_order(image=snake_case_ ) for image in images]
lowercase =[to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
lowercase ={'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(snake_case_ ):
lowercase =target_sizes.numpy()
lowercase =[]
for idx in range(len(snake_case_ ) ):
lowercase =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=snake_case_ )
lowercase =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
lowercase =logits.argmax(dim=1 )
lowercase =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 72 | '''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : Tuple = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_a : int = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a : int = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_a : Any = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : Any = None
# source code of `config_class`
__UpperCAmelCase : Any = inspect.getsource(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = _re_checkpoint.findall(lowerCamelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
__UpperCAmelCase : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__UpperCAmelCase : List[str] = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__UpperCAmelCase : Union[str, Any] = ckpt_name
break
return checkpoint
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__UpperCAmelCase : List[Any] = get_checkpoint_from_config_class(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Optional[Any] = "\n".join(sorted(lowerCamelCase__ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 168 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase ( A__ , A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self , _snake_case = 1000 , _snake_case = None ) -> Dict:
"""simple docstring"""
self.set_timesteps(_snake_case )
# standard deviation of the initial noise distribution
UpperCAmelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase = 4
# running values
UpperCAmelCase = []
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = num_inference_steps
UpperCAmelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCAmelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCAmelCase = torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase = (1.0 - self.betas**2) ** 0.5
UpperCAmelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase = timesteps.to(_snake_case )
UpperCAmelCase = []
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
UpperCAmelCase = (self.timesteps == timestep).nonzero().item()
UpperCAmelCase = timestep_index + 1
UpperCAmelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_snake_case )
if len(self.ets ) == 1:
UpperCAmelCase = self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCAmelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase = self._get_prev_sample(_snake_case , _snake_case , _snake_case , _snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_snake_case )
def snake_case_ ( self , _snake_case , *_snake_case , **_snake_case ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.alphas[timestep_index]
UpperCAmelCase = self.betas[timestep_index]
UpperCAmelCase = self.alphas[prev_timestep_index]
UpperCAmelCase = self.betas[prev_timestep_index]
UpperCAmelCase = (sample - sigma * ets) / max(_snake_case , 1e-8 )
UpperCAmelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps
| 720 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__magic_name__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__magic_name__ = []
__magic_name__ = []
__magic_name__ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__magic_name__ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"emoji": True,
},
}
]
__magic_name__ = 0
for log in Path().glob("*.log"):
__magic_name__ = 0
with open(log, "r") as f:
for line in f:
__magic_name__ = json.loads(line)
if line.get("nodeid", "") != "":
__magic_name__ = line["nodeid"]
if line.get("duration", None) is not None:
__magic_name__ = f'''{line["duration"]:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__magic_name__ = []
log.unlink()
__magic_name__ = ""
__magic_name__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__magic_name__ = []
__magic_name__ = {}
for test in failed_tests:
__magic_name__ = test[0].split("::")
__magic_name__ = data[0].split("/")[-1]
if data[0] not in filesafailed:
__magic_name__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__magic_name__ = [test[0] for test in failed_table]
__magic_name__ = list(set(files))
# Count number of instances in failed_tests
__magic_name__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__magic_name__ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__magic_name__ = "Too many failed tests, please see the full report in the Action results."
__magic_name__ = len(err) + 10
__magic_name__ = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__magic_name__ = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__magic_name__ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
__magic_name__ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__magic_name__ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__magic_name__ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__magic_name__ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__magic_name__ = row[0]
else:
__magic_name__ = ""
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 391 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class a__( pl.LightningModule ):
def __init__( self : int , __snake_case : Optional[int] ):
super().__init__()
a : Tuple = model
a : Dict = 2
a : int = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowercase_ ( self : str ):
pass
def lowerCamelCase__ ( _A , _A , _A ):
# load longformer model from model identifier
a : int = LongformerModel.from_pretrained(_A )
a : Tuple = LightningModel(_A )
a : Tuple = torch.load(_A , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
a : List[str] = LongformerForQuestionAnswering.from_pretrained(_A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
lowerCAmelCase: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase: List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
) | 526 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase: Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
lowerCAmelCase: Optional[int] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCAmelCase: int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__:
lowercase__ = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """A folder containing the training data."""} )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
lowercase__ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase__ = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} )
lowercase__ = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowercase_ ( self : List[Any] ):
a : Any = {}
if self.train_dir is not None:
a : Dict = self.train_dir
if self.validation_dir is not None:
a : Union[str, Any] = self.validation_dir
a : Any = data_files if data_files else None
@dataclass
class a__:
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCamelCase__ )} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Stride to use for the encoder."""} , )
class a__:
def __init__( self : List[str] , __snake_case : int=1_92 , __snake_case : int=32 , __snake_case : List[str]=4 , __snake_case : Union[str, Any]=0.6 ):
a : Any = input_size
a : Union[str, Any] = mask_patch_size
a : int = model_patch_size
a : Tuple = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
a : str = self.input_size // self.mask_patch_size
a : Union[str, Any] = self.mask_patch_size // self.model_patch_size
a : str = self.rand_size**2
a : Tuple = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : str ):
a : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
a : List[str] = np.zeros(self.token_count , dtype=__snake_case )
a : Any = 1
a : List[str] = mask.reshape((self.rand_size, self.rand_size) )
a : Tuple = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCamelCase__ ( _A ):
a : str = torch.stack([example['pixel_values'] for example in examples] )
a : List[str] = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , _A , _A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a : int = training_args.get_process_log_level()
logger.setLevel(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
a : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
a : Union[str, Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _A ) and data_args.train_val_split > 0.0:
a : Any = ds['train'].train_test_split(data_args.train_val_split )
a : str = split['train']
a : Any = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a : Tuple = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
a : Any = AutoConfig.from_pretrained(model_args.config_name_or_path , **_A )
elif model_args.model_name_or_path:
a : Optional[int] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_A )
else:
a : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_A , 'decoder_type' ):
a : List[str] = 'simmim'
# adapt config
a : str = model_args.image_size if model_args.image_size is not None else config.image_size
a : Union[str, Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
a : Any = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
a : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_A )
elif model_args.model_name_or_path:
a : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_A )
else:
a : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
a : int = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
a : Tuple = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
a : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(_A )
if training_args.do_train:
a : Tuple = ds['train'].column_names
else:
a : Dict = ds['validation'].column_names
if data_args.image_column_name is not None:
a : Optional[Any] = data_args.image_column_name
elif "image" in column_names:
a : str = 'image'
elif "img" in column_names:
a : Union[str, Any] = 'img'
else:
a : str = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
a : Optional[int] = Compose(
[
Lambda(lambda _A : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
a : Dict = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_A ):
a : Dict = [transforms(_A ) for image in examples[image_column_name]]
a : Optional[int] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
a : List[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_A )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
a : str = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_A )
# Initialize our trainer
a : List[str] = Trainer(
model=_A , args=_A , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_A , data_collator=_A , )
# Training
if training_args.do_train:
a : str = None
if training_args.resume_from_checkpoint is not None:
a : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a : Dict = last_checkpoint
a : Optional[int] = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a : List[Any] = trainer.evaluate()
trainer.log_metrics('eval' , _A )
trainer.save_metrics('eval' , _A )
# Write model card and (optionally) push to hub
a : str = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
if __name__ == "__main__":
main() | 526 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
a : Optional[Any] = '''http://www.mocksite.com/file1.txt'''
a : int = '''\"text\": [\"foo\", \"foo\"]'''
a : str = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 200
__SCREAMING_SNAKE_CASE = {"Content-Length": "100"}
__SCREAMING_SNAKE_CASE = {}
def A ( self : List[str] , **a_ : Optional[int] ):
"""simple docstring"""
return [bytes(_lowerCamelCase , "utf-8" )]
def __UpperCAmelCase ( *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Tuple ) -> Dict:
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> List[str]:
import requests
monkeypatch.setattr(_lowerCAmelCase , "request" , _lowerCAmelCase )
__snake_case = URL
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = url
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = [url]
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = {"train": url}
__snake_case = "dummy"
__snake_case = "downloads"
__snake_case = tmp_path
__snake_case = DownloadConfig(
cache_dir=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , use_etag=_lowerCAmelCase , )
__snake_case = DownloadManager(dataset_name=_lowerCAmelCase , download_config=_lowerCAmelCase )
__snake_case = dl_manager.download(_lowerCAmelCase )
__snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = [downloaded_paths]
__snake_case = [urls]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
__snake_case = downloaded_paths.values()
__snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case = Path(_lowerCAmelCase )
__snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case = downloaded_path.read_text()
assert content == CONTENT
__snake_case = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
__snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> Optional[Any]:
__snake_case = str(_lowerCAmelCase )
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = filename
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = [filename]
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = {"train": filename}
__snake_case = "dummy"
__snake_case = xz_file.parent
__snake_case = "extracted"
__snake_case = DownloadConfig(
cache_dir=_lowerCAmelCase , use_etag=_lowerCAmelCase , )
__snake_case = DownloadManager(dataset_name=_lowerCAmelCase , download_config=_lowerCAmelCase )
__snake_case = dl_manager.extract(_lowerCAmelCase )
__snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = [extracted_paths]
__snake_case = [paths]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
assert "train" in extracted_paths.keys()
__snake_case = extracted_paths.values()
__snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case = Path(_lowerCAmelCase )
__snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCAmelCase , etag=_lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case = extracted_path.read_text()
__snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
assert path.endswith(".jsonl" )
for num_items, line in enumerate(_lowerCAmelCase , start=1 ):
__snake_case = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : str ) -> Any:
__snake_case = request.getfixturevalue(_lowerCAmelCase )
__snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ):
_test_jsonl(_lowerCAmelCase , _lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> Optional[Any]:
__snake_case = request.getfixturevalue(_lowerCAmelCase )
__snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ):
_test_jsonl(_lowerCAmelCase , _lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> Dict:
__snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCAmelCase ) , start=1 ):
assert os.path.basename(_lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
a_ = random.Random()
def __UpperCAmelCase (lowercase__ ,lowercase__=1.0 ,lowercase__=None ,lowercase__=None ) -> List[str]:
'''simple docstring'''
if rng is None:
a_ = global_rng
a_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self: List[str] , a: Dict , a: Dict=7 , a: List[str]=4_00 , a: Any=20_00 , a: Tuple=24 , a: Optional[int]=24 , a: Optional[Any]=0.0 , a: int=1_60_00 , a: Optional[int]=True , a: int=True , ) ->Optional[int]:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = min_seq_length
a_ = max_seq_length
a_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a_ = feature_size
a_ = num_mel_bins
a_ = padding_value
a_ = sampling_rate
a_ = return_attention_mask
a_ = do_normalize
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCAmelCase ( self: Optional[int] , a: Optional[int]=False , a: List[str]=False) ->Tuple:
'''simple docstring'''
def _flatten(a: List[Any]):
return list(itertools.chain(*a))
if equal_length:
a_ = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
a_ = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a_ = [np.asarray(a) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase =SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowerCAmelCase ( self: Tuple) ->int:
'''simple docstring'''
a_ = SpeechaTextFeatureExtractionTester(self)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->Tuple:
'''simple docstring'''
self.assertTrue(np.all(np.mean(a , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(a , axis=0) - 1) < 1e-3))
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a_ = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a_ = [np.asarray(a) for speech_input in speech_inputs]
# Test feature size
a_ = feature_extractor(a , padding=a , return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
# Test not batched input
a_ = feature_extractor(speech_inputs[0] , return_tensors="np").input_features
a_ = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_features
self.assertTrue(np.allclose(a , a , atol=1e-3))
# Test batched
a_ = feature_extractor(a , return_tensors="np").input_features
a_ = feature_extractor(a , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(a , a):
self.assertTrue(np.allclose(a , a , atol=1e-3))
# Test 2-D numpy arrays are batched.
a_ = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a_ = np.asarray(a)
a_ = feature_extractor(a , return_tensors="np").input_features
a_ = feature_extractor(a , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(a , a):
self.assertTrue(np.allclose(a , a , atol=1e-3))
def _lowerCAmelCase ( self: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a_ = ['''longest''', '''max_length''', '''do_not_pad''']
a_ = [None, 16, None]
for max_length, padding in zip(a , a):
a_ = feature_extractor(
a , padding=a , max_length=a , return_attention_mask=a)
a_ = inputs.input_features
a_ = inputs.attention_mask
a_ = [np.sum(a) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _lowerCAmelCase ( self: int) ->List[Any]:
'''simple docstring'''
a_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a_ = ['''longest''', '''max_length''', '''do_not_pad''']
a_ = [None, 16, None]
for max_length, padding in zip(a , a):
a_ = feature_extractor(
a , max_length=a , padding=a , return_tensors="np" , return_attention_mask=a)
a_ = inputs.input_features
a_ = inputs.attention_mask
a_ = [np.sum(a) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _lowerCAmelCase ( self: Tuple) ->List[str]:
'''simple docstring'''
a_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a_ = feature_extractor(
a , padding="max_length" , max_length=4 , truncation=a , return_tensors="np" , return_attention_mask=a , )
a_ = inputs.input_features
a_ = inputs.attention_mask
a_ = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1])
self._check_zero_mean_unit_variance(input_features[2])
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a_ = feature_extractor(
a , padding="longest" , max_length=4 , truncation=a , return_tensors="np" , return_attention_mask=a , )
a_ = inputs.input_features
a_ = inputs.attention_mask
a_ = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24))
a_ = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a_ = feature_extractor(
a , padding="longest" , max_length=16 , truncation=a , return_tensors="np" , return_attention_mask=a , )
a_ = inputs.input_features
a_ = inputs.attention_mask
a_ = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24))
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
import torch
a_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ = np.random.rand(1_00 , 32).astype(np.floataa)
a_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
a_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def _lowerCAmelCase ( self: List[Any] , a: Dict) ->List[str]:
'''simple docstring'''
from datasets import load_dataset
a_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
a_ = ds.sort("id").select(range(a))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCAmelCase ( self: Dict) ->List[str]:
'''simple docstring'''
a_ = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
])
# fmt: on
a_ = self._load_datasamples(1)
a_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ = feature_extractor(a , return_tensors="pt").input_features
self.assertEquals(input_features.shape , (1, 5_84, 24))
self.assertTrue(np.allclose(input_features[0, 0, :30] , a , atol=1e-4))
| 685 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=[30, 30] , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=10 , ) -> Dict:
"""simple docstring"""
A : List[Any] = parent
A : Optional[Any] = batch_size
A : int = image_size
A : Optional[Any] = patch_size
A : Union[str, Any] = num_channels
A : Optional[int] = is_training
A : List[str] = use_labels
A : Any = hidden_size
A : Dict = num_hidden_layers
A : List[str] = num_attention_heads
A : List[Any] = intermediate_size
A : List[Any] = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : Optional[Any] = type_sequence_label_size
A : Optional[Any] = initializer_range
A : Optional[Any] = num_labels
A : Union[str, Any] = scope
A : Optional[Any] = n_targets
A : str = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A : Dict = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A : str = num_patches + 1 + self.num_detection_tokens
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A : Dict = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A : int = []
for i in range(self.batch_size ):
A : str = {}
A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.rand(self.n_targets , 4 , device=SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
A : Any = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = YolosModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[int] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = YolosForObjectDetection(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Any = model(pixel_values=SCREAMING_SNAKE_CASE )
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A : List[str] = model(pixel_values=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Any = self.prepare_config_and_inputs()
A, A, A : int = config_and_inputs
A : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A : str = []
for i in range(self.model_tester.batch_size ):
A : Optional[Any] = {}
A : int = torch.ones(
size=(self.model_tester.n_targets,) , device=SCREAMING_SNAKE_CASE , dtype=torch.long )
A : Any = torch.ones(
self.model_tester.n_targets , 4 , device=SCREAMING_SNAKE_CASE , dtype=torch.float )
labels.append(SCREAMING_SNAKE_CASE )
A : Optional[int] = labels
return inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = YolosModelTester(self )
A : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A, A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A, A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[Any] = model_class(SCREAMING_SNAKE_CASE )
A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : List[str] = [*signature.parameters.keys()]
A : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A, A : Any = self.model_tester.prepare_config_and_inputs_for_common()
A : List[Any] = True
# in YOLOS, the seq_len is different
A : Optional[int] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A : Optional[int] = True
A : int = False
A : List[str] = True
A : str = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : int = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A : int = True
A : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A : Any = len(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
A : Optional[Any] = True
A : Union[str, Any] = True
A : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE ) )
A : List[str] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : Dict = outputs.hidden_states
A : List[Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# YOLOS has a different seq_length
A : int = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A, A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : int = YolosModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(SCREAMING_SNAKE_CASE )
A : str = self.default_image_processor
A : Any = prepare_img()
A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : Optional[Any] = model(inputs.pixel_values )
# verify outputs
A : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=SCREAMING_SNAKE_CASE , )
A : str = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify postprocessing
A : int = image_processor.post_process_object_detection(
SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(SCREAMING_SNAKE_CASE )
A : Optional[Any] = [75, 75, 17, 63, 17]
A : Union[str, Any] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(SCREAMING_SNAKE_CASE )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , SCREAMING_SNAKE_CASE ) )
| 634 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__lowerCamelCase : Optional[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__lowerCamelCase : Optional[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase__ ( __A: Vector ,__A: Vector ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def lowercase__ ( __A: Vector ,__A: Vector ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(lowercase_ ,lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase__ ( ):
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' ,number=1_0_0_0_0 ,globals=globals() ,) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' ,number=1_0_0_0_0 ,globals=globals() ,) )
benchmark()
| 720 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase ( _lowerCamelCase ,_lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ ='''nat'''
UpperCamelCase__ ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , lowerCamelCase_ : Dict=4 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : List[str]=64 , lowerCamelCase_ : Union[str, Any]=[3, 4, 6, 5] , lowerCamelCase_ : List[Any]=[2, 4, 8, 16] , lowerCamelCase_ : List[Any]=7 , lowerCamelCase_ : Union[str, Any]=3.0 , lowerCamelCase_ : int=True , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Tuple=1E-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : str=None , lowerCamelCase_ : Tuple=None , **lowerCamelCase_ : Any , ) -> Any:
super().__init__(**lowerCamelCase_ )
__magic_name__ : List[Any] = patch_size
__magic_name__ : str = num_channels
__magic_name__ : Union[str, Any] = embed_dim
__magic_name__ : Dict = depths
__magic_name__ : str = len(lowerCamelCase_ )
__magic_name__ : List[str] = num_heads
__magic_name__ : Optional[int] = kernel_size
__magic_name__ : Any = mlp_ratio
__magic_name__ : Any = qkv_bias
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Any = attention_probs_dropout_prob
__magic_name__ : List[str] = drop_path_rate
__magic_name__ : str = hidden_act
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : Any = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : str = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
__magic_name__ : str = layer_scale_init_value
__magic_name__ : Dict = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
__magic_name__ , __magic_name__ : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 501 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a__ ( A__ ):
A = 'vit_msn'
def __init__( self : int,_A : Any=768,_A : Tuple=12,_A : List[str]=12,_A : List[Any]=3072,_A : Tuple="gelu",_A : Optional[Any]=0.0,_A : Optional[Any]=0.0,_A : str=0.02,_A : Optional[Any]=1E-06,_A : Union[str, Any]=224,_A : List[str]=16,_A : Optional[int]=3,_A : int=True,**_A : Optional[int],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : str = qkv_bias
| 216 | __lowerCamelCase : int = '''Input must be a string of 8 numbers plus letter'''
__lowerCamelCase : int = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = f'Expected string as input, found {type(lowerCAmelCase ).__name__}'
raise TypeError(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = spanish_id.replace("-" , "" ).upper()
if len(lowerCAmelCase ) != 9:
raise ValueError(lowerCAmelCase )
try:
SCREAMING_SNAKE_CASE_ : Tuple = int(spanish_id_clean[0:8] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 1 |
import json
import sys
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
UpperCamelCase__ : List[Any] = json.load(UpperCamelCase__ )
UpperCamelCase__ : Optional[Any] = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = results[benchmark_name]
UpperCamelCase__ : Optional[int] = benchmark_name.split('''/''' )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
UpperCamelCase__ : Optional[int] = '''| metric |'''
UpperCamelCase__ : Tuple = '''|--------|'''
UpperCamelCase__ : int = '''| new / old (diff) |'''
for metric_name in sorted(UpperCamelCase__ ):
UpperCamelCase__ : Optional[int] = benchmark_res[metric_name]
UpperCamelCase__ : int = metric_vals['''new''']
UpperCamelCase__ : Dict = metric_vals.get('''old''' , UpperCamelCase__ )
UpperCamelCase__ : Union[str, Any] = metric_vals.get('''diff''' , UpperCamelCase__ )
UpperCamelCase__ : str = f''' {new_val:f}''' if isinstance(UpperCamelCase__ , (int, float) ) else '''None'''
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(UpperCamelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(UpperCamelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
lowerCamelCase =sys.argv[1]
lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 462 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''realm'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=1e-3 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=3_2_0 , __SCREAMING_SNAKE_CASE=1_3_3_5_3_7_1_8 , __SCREAMING_SNAKE_CASE=5_0_0_0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Common config
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : List[str] = hidden_size
UpperCamelCase__ : Union[str, Any] = retriever_proj_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : str = num_candidates
UpperCamelCase__ : int = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : List[Any] = layer_norm_eps
# Reader config
UpperCamelCase__ : List[Any] = span_hidden_size
UpperCamelCase__ : List[Any] = max_span_width
UpperCamelCase__ : Optional[Any] = reader_layer_norm_eps
UpperCamelCase__ : Optional[Any] = reader_beam_size
UpperCamelCase__ : int = reader_seq_len
# Retrieval config
UpperCamelCase__ : List[str] = num_block_records
UpperCamelCase__ : Union[str, Any] = searcher_beam_size
| 462 | 1 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__magic_name__ = logging.getLogger(__name__)
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = git.Repo(search_parent_directories=UpperCamelCase__ )
_UpperCAmelCase = {
"repo_id": str(UpperCamelCase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(UpperCamelCase__ , "git_log.json" ) , "w" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=4 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if params.n_gpu <= 0:
_UpperCAmelCase = 0
_UpperCAmelCase = -1
_UpperCAmelCase = True
_UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
_UpperCAmelCase = int(os.environ["WORLD_SIZE"] )
_UpperCAmelCase = int(os.environ["N_GPU_NODE"] )
_UpperCAmelCase = int(os.environ["RANK"] )
# number of nodes / node ID
_UpperCAmelCase = params.world_size // params.n_gpu_per_node
_UpperCAmelCase = params.global_rank // params.n_gpu_per_node
_UpperCAmelCase = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
_UpperCAmelCase = 1
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
_UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
_UpperCAmelCase = params.n_nodes > 1
# summary
_UpperCAmelCase = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 657 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 1 |
__snake_case : Tuple = range(2, 20 + 1)
__snake_case : str = [10**k for k in range(ks[-1] + 1)]
__snake_case : dict[int, dict[int, list[list[int]]]] = {}
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = sum(a_i[j] for j in range(UpperCamelCase_ , len(UpperCamelCase_ ) ) )
lowerCAmelCase__ = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase_ ) , UpperCamelCase_ ) ) )
lowerCAmelCase__ , lowerCAmelCase__ = 0, 0
lowerCAmelCase__ = n - i
lowerCAmelCase__ = memo.get(UpperCamelCase_ )
if sub_memo is not None:
lowerCAmelCase__ = sub_memo.get(UpperCamelCase_ )
if jumps is not None and len(UpperCamelCase_ ) > 0:
# find and make the largest jump without going over
lowerCAmelCase__ = -1
for _k in range(len(UpperCamelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase__ = _k
break
if max_jump >= 0:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase__ = diff + c
for j in range(min(UpperCamelCase_ , len(UpperCamelCase_ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = divmod(UpperCamelCase_ , 10 )
if new_c > 0:
add(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = {c: []}
lowerCAmelCase__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase__ , lowerCAmelCase__ = next_term(UpperCamelCase_ , k - 1 , i + dn , UpperCamelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase__ , lowerCAmelCase__ = compute(UpperCamelCase_ , UpperCamelCase_ , i + dn , UpperCamelCase_ )
diff += _diff
dn += terms_jumped
lowerCAmelCase__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase__ = 0
while j < len(UpperCamelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase_ , (diff, dn, k) )
return (diff, dn)
def _UpperCamelCase ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(UpperCamelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase__ = i
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0, 0, 0
for j in range(len(UpperCamelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase__ = ds_c + ds_b
diff += addend
lowerCAmelCase__ = 0
for j in range(UpperCamelCase_ ):
lowerCAmelCase__ = a_i[j] + addend
lowerCAmelCase__ , lowerCAmelCase__ = divmod(UpperCamelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return diff, i - start_i
def _UpperCamelCase ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> List[str]:
"""simple docstring"""
for j in range(UpperCamelCase_ , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = digits[j] + addend
if s >= 10:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(UpperCamelCase_ , 10 )
lowerCAmelCase__ = addend // 10 + quotient
else:
lowerCAmelCase__ = s
lowerCAmelCase__ = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(UpperCamelCase_ , 10 )
digits.append(UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : int = 10**15 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = [1]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
while True:
lowerCAmelCase__ , lowerCAmelCase__ = next_term(UpperCamelCase_ , 20 , i + dn , UpperCamelCase_ )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase__ = 0
for j in range(len(UpperCamelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 365 |
__snake_case : Tuple = range(2, 20 + 1)
__snake_case : str = [10**k for k in range(ks[-1] + 1)]
__snake_case : dict[int, dict[int, list[list[int]]]] = {}
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = sum(a_i[j] for j in range(UpperCamelCase_ , len(UpperCamelCase_ ) ) )
lowerCAmelCase__ = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase_ ) , UpperCamelCase_ ) ) )
lowerCAmelCase__ , lowerCAmelCase__ = 0, 0
lowerCAmelCase__ = n - i
lowerCAmelCase__ = memo.get(UpperCamelCase_ )
if sub_memo is not None:
lowerCAmelCase__ = sub_memo.get(UpperCamelCase_ )
if jumps is not None and len(UpperCamelCase_ ) > 0:
# find and make the largest jump without going over
lowerCAmelCase__ = -1
for _k in range(len(UpperCamelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase__ = _k
break
if max_jump >= 0:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase__ = diff + c
for j in range(min(UpperCamelCase_ , len(UpperCamelCase_ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = divmod(UpperCamelCase_ , 10 )
if new_c > 0:
add(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = {c: []}
lowerCAmelCase__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase__ , lowerCAmelCase__ = next_term(UpperCamelCase_ , k - 1 , i + dn , UpperCamelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase__ , lowerCAmelCase__ = compute(UpperCamelCase_ , UpperCamelCase_ , i + dn , UpperCamelCase_ )
diff += _diff
dn += terms_jumped
lowerCAmelCase__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase__ = 0
while j < len(UpperCamelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase_ , (diff, dn, k) )
return (diff, dn)
def _UpperCamelCase ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(UpperCamelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase__ = i
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0, 0, 0
for j in range(len(UpperCamelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase__ = ds_c + ds_b
diff += addend
lowerCAmelCase__ = 0
for j in range(UpperCamelCase_ ):
lowerCAmelCase__ = a_i[j] + addend
lowerCAmelCase__ , lowerCAmelCase__ = divmod(UpperCamelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return diff, i - start_i
def _UpperCamelCase ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> List[str]:
"""simple docstring"""
for j in range(UpperCamelCase_ , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = digits[j] + addend
if s >= 10:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(UpperCamelCase_ , 10 )
lowerCAmelCase__ = addend // 10 + quotient
else:
lowerCAmelCase__ = s
lowerCAmelCase__ = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(UpperCamelCase_ , 10 )
digits.append(UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : int = 10**15 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = [1]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
while True:
lowerCAmelCase__ , lowerCAmelCase__ = next_term(UpperCamelCase_ , 20 , i + dn , UpperCamelCase_ )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase__ = 0
for j in range(len(UpperCamelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 365 | 1 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : Any ): # noqa: E741
'''simple docstring'''
__magic_name__ = len(lowerCamelCase_ )
__magic_name__ = 0
__magic_name__ = [0] * n
__magic_name__ = [False] * n
__magic_name__ = [False] * n
def dfs(lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
if parent == root:
out_edge_count += 1
__magic_name__ = True
__magic_name__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__magic_name__ = dfs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__magic_name__ = True
# AP found via cycle
if at == low[to]:
__magic_name__ = True
else:
__magic_name__ = min(low[at] , lowerCamelCase_ )
return out_edge_count
for i in range(lowerCamelCase_ ):
if not visited[i]:
__magic_name__ = 0
__magic_name__ = dfs(lowerCamelCase_ , lowerCamelCase_ , -1 , lowerCamelCase_ )
__magic_name__ = out_edge_count > 1
for x in range(len(lowerCamelCase_ ) ):
if is_art[x] is True:
print(lowerCamelCase_ )
# Adjacency list of graph
__magic_name__ : List[str] ={
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 664 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _UpperCAmelCase ( __lowercase ):
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
class _UpperCAmelCase ( __lowercase ):
def __init__( self , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=5_1_2 , lowercase_="cls" , lowercase_=False , lowercase_=True , **lowercase_ , ) -> List[str]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase = project_dim
UpperCAmelCase = pooler_fn
UpperCAmelCase = learn_encoder
UpperCAmelCase = use_attention_mask
class _UpperCAmelCase ( __lowercase ):
__SCREAMING_SNAKE_CASE : str = [r"pooler", r"logit_scale"]
__SCREAMING_SNAKE_CASE : Optional[int] = [r"position_ids", r"predictions.decoder.bias"]
__SCREAMING_SNAKE_CASE : Optional[int] = "roberta"
__SCREAMING_SNAKE_CASE : Any = RobertaSeriesConfig
def __init__( self , lowercase_ ) -> Union[str, Any]:
super().__init__(lowercase_ )
UpperCAmelCase = XLMRobertaModel(lowercase_ )
UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase = getattr(lowercase_ , 'has_pre_transformation' , lowercase_ )
if self.has_pre_transformation:
UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def a_ ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Dict:
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = self.base_model(
input_ids=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_attentions=lowercase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowercase_ , )
if self.has_pre_transformation:
UpperCAmelCase = outputs["hidden_states"][-2]
UpperCAmelCase = self.pre_LN(lowercase_ )
UpperCAmelCase = self.transformation_pre(lowercase_ )
return TransformationModelOutput(
projection_state=lowercase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowercase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 706 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 32
def lowercase__ ( lowerCAmelCase : Accelerator , lowerCAmelCase : int = 16 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCAmelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase , max_length=lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
lowerCAmelCase , padding='longest' , max_length=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
UpperCAmelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE_ = mocked_dataloaders # noqa: F811
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : str ) -> str:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase ) == "1":
UpperCAmelCase = 2
# New Code #
UpperCAmelCase = int(args.gradient_accumulation_steps )
UpperCAmelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = evaluate.load('glue' , 'mrpc' )
set_seed(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(lowerCAmelCase , lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Now we train the model
for epoch in range(lowerCAmelCase ):
model.train()
with LocalSGD(
accelerator=lowerCAmelCase , model=lowerCAmelCase , local_sgd_steps=lowerCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase ):
UpperCAmelCase = model(**lowerCAmelCase )
UpperCAmelCase = output.loss
accelerator.backward(lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase , references=lowerCAmelCase , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCAmelCase )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase , default=lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=lowerCAmelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=lowerCAmelCase , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
main()
| 183 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 'Salesforce/blip-image-captioning-base'
lowerCamelCase : List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowerCamelCase : Union[str, Any] = 'image_captioner'
lowerCamelCase : Union[str, Any] = AutoModelForVisionaSeq
lowerCamelCase : Tuple = ['image']
lowerCamelCase : Optional[Any] = ['text']
def __init__( self : Dict , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
requires_backends(self , ["""vision"""] )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : "Image" ) -> Dict:
return self.pre_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def _a ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
return self.model.generate(**__SCREAMING_SNAKE_CASE )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
return self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0].strip()
| 68 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : int = 0
a__ : bool = False
a__ : float = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
def __lowercase( self : str ) -> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs(), {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(), {'''a''': 2} )
self.assertDictEqual(MockClass(a=2, b=__lowerCamelCase ).to_kwargs(), {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2, c=2.25 ).to_kwargs(), {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __lowercase( self : Tuple ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase__ : Union[str, Any] = GradScalerKwargs(init_scale=10_24, growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase__ : str = Accelerator(mixed_precision='''fp16''', kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase__ : Union[str, Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale, 1024.0 )
self.assertEqual(scaler._growth_factor, 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor, 0.5 )
self.assertEqual(scaler._growth_interval, 20_00 )
self.assertEqual(scaler._enabled, __lowerCamelCase )
@require_multi_gpu
def __lowercase( self : Optional[int] ) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCamelCase, env=os.environ.copy() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.Linear(100, 200)
_SCREAMING_SNAKE_CASE : int = accelerator.prepare(model)
# Check the values changed in kwargs
_SCREAMING_SNAKE_CASE : List[Any] = """"""
_SCREAMING_SNAKE_CASE : List[Any] = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 344 | 0 |
from __future__ import annotations
from collections.abc import MutableSequence
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
if len(__snake_case ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCamelCase__ = list(__snake_case )
UpperCamelCase__ = degree
def __add__( self , __lowerCAmelCase ):
if self.degree > polynomial_a.degree:
UpperCamelCase__ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __snake_case )
else:
UpperCamelCase__ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __snake_case )
def __sub__( self , __lowerCAmelCase ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , __lowerCAmelCase ):
UpperCamelCase__ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __snake_case )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
UpperCamelCase__ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__snake_case )
return polynomial
def __repr__( self ):
return self.__str__()
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0] * self.degree
for i in range(self.degree ):
UpperCamelCase__ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __snake_case )
def _lowerCamelCase ( self , __lowerCAmelCase = 0 ):
UpperCamelCase__ = [0] * (self.degree + 2)
UpperCamelCase__ = constant
for i in range(self.degree + 1 ):
UpperCamelCase__ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __snake_case )
def __eq__( self , __lowerCAmelCase ):
if not isinstance(__snake_case , __snake_case ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , __lowerCAmelCase ):
return not self.__eq__(__snake_case )
| 705 |
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
while repunit:
UpperCamelCase__ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (a__ :int = 100_0000 ):
"""simple docstring"""
UpperCamelCase__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(a__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 548 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A_ ( ) -> List[Any]:
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=__UpperCamelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=__UpperCamelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=__UpperCamelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=__UpperCamelCase , default=0 , help='cuda_id.' , )
a__ : List[str] = parser.parse_args()
return args
def A_ ( A__ , A__ , A__ ) -> str:
if not len(__UpperCamelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
a__ , a__ : str = imgs[0].size
a__ : Any = Image.new('RGB' , size=(cols * w, rows * h) )
a__ , a__ : Optional[Any] = grid.size
for i, img in enumerate(__UpperCamelCase ):
grid.paste(__UpperCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def A_ ( A__ , A__="robotic cat with wings" , A__=7.5 , A__=50 , A__=1 , A__=42 , ) -> Optional[int]:
a__ : Any = torch.Generator(pipeline.device ).manual_seed(__UpperCamelCase )
a__ : int = pipeline(
__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , ).images
a__ : str = int(math.sqrt(__UpperCamelCase ) )
a__ : Dict = image_grid(__UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase : Optional[int] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
lowercase : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
lowercase : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
lowercase : Tuple = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
lowercase : str = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
lowercase : Optional[Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
lowercase : List[str] = unet.to(torch.device("""cuda""", args.cuda_id))
lowercase : List[Any] = pipeline.to(unet.device)
lowercase : int = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
lowercase : List[Any] = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 302 |
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__UpperCamelCase , n - 1 , __UpperCamelCase ) * a) % mod
else:
A_ = binary_exponentiation(__UpperCamelCase , n / 2 , __UpperCamelCase )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE : str = 701
SCREAMING_SNAKE_CASE : int = 10_0000_0000
SCREAMING_SNAKE_CASE : Optional[Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 141 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( a__ , a__ ) ->np.array:
'''simple docstring'''
_UpperCamelCase = f'{sampling_rate}'
_UpperCamelCase = "1"
_UpperCamelCase = "f32le"
_UpperCamelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_UpperCamelCase = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
_UpperCamelCase = output_stream[0]
_UpperCamelCase = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def lowerCAmelCase__ ( a__ , a__ , a__ = "f32le" , ) ->int:
'''simple docstring'''
_UpperCamelCase = f'{sampling_rate}'
_UpperCamelCase = "1"
if format_for_conversion == "s16le":
_UpperCamelCase = 2
elif format_for_conversion == "f32le":
_UpperCamelCase = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
_UpperCamelCase = platform.system()
if system == "Linux":
_UpperCamelCase = "alsa"
_UpperCamelCase = "default"
elif system == "Darwin":
_UpperCamelCase = "avfoundation"
_UpperCamelCase = ":0"
elif system == "Windows":
_UpperCamelCase = "dshow"
_UpperCamelCase = "default"
_UpperCamelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
_UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_UpperCamelCase = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def lowerCAmelCase__ ( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ) ->Union[str, Any]:
'''simple docstring'''
if stream_chunk_s is not None:
_UpperCamelCase = stream_chunk_s
else:
_UpperCamelCase = chunk_length_s
_UpperCamelCase = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
_UpperCamelCase = np.intaa
_UpperCamelCase = 2
elif format_for_conversion == "f32le":
_UpperCamelCase = np.floataa
_UpperCamelCase = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
_UpperCamelCase = chunk_length_s / 6
_UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
_UpperCamelCase = [stride_length_s, stride_length_s]
_UpperCamelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_UpperCamelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_UpperCamelCase = datetime.datetime.now()
_UpperCamelCase = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
_UpperCamelCase = np.frombuffer(item["raw"] , dtype=a__ )
_UpperCamelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
_UpperCamelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ = False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = B""
_UpperCamelCase , _UpperCamelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
_UpperCamelCase = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
_UpperCamelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
_UpperCamelCase = (_stride_left, stride_right)
_UpperCamelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
_UpperCamelCase = False
yield item
_UpperCamelCase = stride_left
_UpperCamelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
_UpperCamelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
_UpperCamelCase = False
yield item
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
_UpperCamelCase = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 717 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableDiffusionSAGPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = False
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int]=0 ) -> Any:
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE : int = sag_pipe.to(lowerCamelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = sag_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : Tuple = sag_pipe.to(lowerCamelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = '''.'''
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sag_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE : Optional[int] = output.images
SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : Any = sag_pipe.to(lowerCamelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = '''.'''
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : str = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 698 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=99 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Optional[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = 13
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Dict = 3_84
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Any = 37
SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Tuple = 0.0_2
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : str = 1_28
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertForMaskedLM(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForTokenClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
if hasattr(lowerCamelCase_ , '''use_cache''' ):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.models.load_model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''encoder_hidden_states''']
SCREAMING_SNAKE_CASE : str = outputs['''encoder_attentions''']
else:
SCREAMING_SNAKE_CASE : List[str] = outputs['''hidden_states''']
SCREAMING_SNAKE_CASE : List[Any] = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : int = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ :Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 698 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( lowerCAmelCase_ , unittest.TestCase ):
lowerCamelCase__: Optional[Any] = OpenAIGPTTokenizer
lowerCamelCase__: Optional[Any] = OpenAIGPTTokenizerFast
lowerCamelCase__: Dict = True
lowerCamelCase__: List[str] = False
def UpperCAmelCase( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
a_ : List[str] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
a_ : Tuple = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
a_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def UpperCAmelCase( self : Dict , lowerCamelCase_ : Union[str, Any] ):
return "lower newer", "lower newer"
def UpperCAmelCase( self : Dict ):
a_ : Tuple = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
a_ : str = """lower"""
a_ : int = ["""low""", """er</w>"""]
a_ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a_ : Tuple = tokens + ["""<unk>"""]
a_ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase( self : Dict , lowerCamelCase_ : List[str]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# Simple input
a_ : Any = """This is a simple input"""
a_ : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
a_ : Optional[Any] = ("""This is a simple input""", """This is a pair""")
a_ : Union[str, Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , )
def UpperCAmelCase( self : Tuple ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class a__ ( lowerCAmelCase_ ):
pass
| 721 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a__ ( lowerCAmelCase_ , unittest.TestCase ):
lowerCamelCase__: int = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCAmelCase( self : int , lowerCamelCase_ : Union[str, Any]=0 ):
a_ : Any = np.random.RandomState(lowerCamelCase_ )
a_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase( self : List[Any] ):
a_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : Optional[Any] = pipe(**lowerCamelCase_ ).images
a_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : str = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : List[str] ):
a_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a_ : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Dict = self.get_dummy_inputs()
a_ : Optional[int] = pipe(**lowerCamelCase_ ).images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : Tuple = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : List[Any] ):
a_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a_ : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Dict = self.get_dummy_inputs()
a_ : Union[str, Any] = pipe(**lowerCamelCase_ ).images
a_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : Optional[int] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : Any ):
a_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a_ : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Optional[Any] = self.get_dummy_inputs()
a_ : Optional[Any] = pipe(**lowerCamelCase_ ).images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : Optional[int] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : List[str] ):
a_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a_ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Dict = self.get_dummy_inputs()
a_ : Union[str, Any] = pipe(**lowerCamelCase_ ).images
a_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : Tuple = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : Optional[Any] ):
a_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Tuple = self.get_dummy_inputs()
a_ : Optional[int] = pipe(**lowerCamelCase_ ).images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : List[Any] = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : Optional[Any] ):
a_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : List[str] = self.get_dummy_inputs()
a_ : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
a_ : Optional[int] = pipe(**lowerCamelCase_ )
a_ : Union[str, Any] = output.images[0, -3:, -3:, -1]
a_ : Tuple = self.get_dummy_inputs()
a_ : List[Any] = 3 * [inputs.pop("""prompt""" )]
a_ : List[Any] = pipe.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="""np""" , )
a_ : str = text_inputs["""input_ids"""]
a_ : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
a_ : List[Any] = prompt_embeds
# forward
a_ : Tuple = pipe(**lowerCamelCase_ )
a_ : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase( self : List[str] ):
a_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : int = self.get_dummy_inputs()
a_ : str = 3 * ["""this is a negative prompt"""]
a_ : int = negative_prompt
a_ : Dict = 3 * [inputs["""prompt"""]]
# forward
a_ : Tuple = pipe(**lowerCamelCase_ )
a_ : Union[str, Any] = output.images[0, -3:, -3:, -1]
a_ : Tuple = self.get_dummy_inputs()
a_ : Optional[int] = 3 * [inputs.pop("""prompt""" )]
a_ : str = []
for p in [prompt, negative_prompt]:
a_ : Optional[int] = pipe.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="""np""" , )
a_ : int = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
a_ , a_ : Union[str, Any] = embeds
# forward
a_ : Optional[Any] = pipe(**lowerCamelCase_ )
a_ : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class a__ ( unittest.TestCase ):
@property
def UpperCAmelCase( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase( self : List[Any] ):
a_ : List[str] = ort.SessionOptions()
a_ : List[str] = False
return options
def UpperCAmelCase( self : Optional[Any] ):
# using the PNDM scheduler by default
a_ : int = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Tuple = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
a_ : int = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type="""np""" )
a_ : Optional[int] = output.images
a_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Optional[int] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase( self : str ):
a_ : str = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
a_ : Any = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Union[str, Any] = """open neural network exchange"""
a_ : str = np.random.RandomState(0 )
a_ : str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCamelCase_ , output_type="""np""" )
a_ : str = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : List[str] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase( self : Optional[int] ):
a_ : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
a_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Optional[int] = """open neural network exchange"""
a_ : Tuple = np.random.RandomState(0 )
a_ : Tuple = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCamelCase_ , output_type="""np""" )
a_ : Tuple = output.images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase( self : Optional[Any] ):
a_ : List[str] = 0
def test_callback_fn(lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : np.ndarray ) -> None:
a_ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
a_ : Union[str, Any] = latents[0, -3:, -3:, -1]
a_ : Optional[Any] = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
a_ : List[str] = latents[0, -3:, -3:, -1]
a_ : int = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
a_ : Optional[int] = False
a_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : List[str] = """Andromeda galaxy in a bottle"""
a_ : Union[str, Any] = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase( self : Dict ):
a_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert pipe.safety_checker is None
a_ : Union[str, Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
a_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ : Any = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 478 | 0 |
from __future__ import annotations
def _A (UpperCamelCase : str ) ->list[int]:
'''simple docstring'''
return [ord(UpperCamelCase ) - 96 for elem in plain]
def _A (UpperCamelCase : list[int] ) ->str:
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def _A () ->None:
'''simple docstring'''
lowerCamelCase__ : Any = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCamelCase )
print("""Decoded:""" , decode(UpperCamelCase ) )
if __name__ == "__main__":
main()
| 157 |
from statistics import mean, stdev
def _A (UpperCamelCase : list , UpperCamelCase : int = 3 ) ->list:
'''simple docstring'''
lowerCamelCase__ : Dict = min(UpperCamelCase )
lowerCamelCase__ : List[str] = max(UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , UpperCamelCase ) for x in data]
def _A (UpperCamelCase : list , UpperCamelCase : int = 3 ) ->list:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = mean(UpperCamelCase )
lowerCamelCase__ : Tuple = stdev(UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma) , UpperCamelCase ) for x in data]
| 157 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase : str =[
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Any = True
while ask_again:
lowerCAmelCase : Any = input(SCREAMING_SNAKE_CASE__ )
try:
if default is not None and len(SCREAMING_SNAKE_CASE__ ) == 0:
return default
return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=[] ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
lowerCAmelCase : str = BulletMenu(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = menu.run(default_choice=SCREAMING_SNAKE_CASE__ )
return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = int(SCREAMING_SNAKE_CASE__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = int(SCREAMING_SNAKE_CASE__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = int(SCREAMING_SNAKE_CASE__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter ):
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
lowerCAmelCase : str = super()._format_usage(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : int = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "data2vec-vision"
def __init__( self , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.0 , __A=0.0 , __A=0.02 , __A=1e-12 , __A=224 , __A=16 , __A=3 , __A=False , __A=False , __A=False , __A=False , __A=0.1 , __A=0.1 , __A=True , __A=[3, 5, 7, 11] , __A=[1, 2, 3, 6] , __A=True , __A=0.4 , __A=256 , __A=1 , __A=False , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
lowerCamelCase : str = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Optional[int] = use_mask_token
lowerCamelCase : Union[str, Any] = use_absolute_position_embeddings
lowerCamelCase : Union[str, Any] = use_relative_position_bias
lowerCamelCase : Optional[Any] = use_shared_relative_position_bias
lowerCamelCase : List[str] = layer_scale_init_value
lowerCamelCase : int = drop_path_rate
lowerCamelCase : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase : str = out_indices
lowerCamelCase : Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase : int = use_auxiliary_head
lowerCamelCase : List[Any] = auxiliary_loss_weight
lowerCamelCase : List[str] = auxiliary_channels
lowerCamelCase : Union[str, Any] = auxiliary_num_convs
lowerCamelCase : List[Any] = auxiliary_concat_input
lowerCamelCase : Optional[int] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 340 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__A : Optional[int] = None
__A : Optional[jnp.ndarray] = None
__A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _snake_case ( cls ):
"""simple docstring"""
return cls()
@dataclass
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : jnp.ndarray
__A : jnp.ndarray
__A : KarrasVeSchedulerState
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __A = 0.02 , __A = 100 , __A = 1.007 , __A = 80 , __A = 0.05 , __A = 50 , ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def _snake_case ( self , __A , __A , __A = () ):
"""simple docstring"""
lowerCamelCase : List[str] = jnp.arange(0 , __A )[::-1].copy()
lowerCamelCase : Tuple = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A , schedule=jnp.array(__A , dtype=jnp.floataa ) , timesteps=__A , )
def _snake_case ( self , __A , __A , __A , __A , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : List[str] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : int = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : Any = random.split(__A , num=1 )
lowerCamelCase : List[Any] = self.config.s_noise * random.normal(key=__A , shape=sample.shape )
lowerCamelCase : Dict = sigma + gamma * sigma
lowerCamelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self , __A , __A , __A , __A , __A , __A = True , ):
"""simple docstring"""
lowerCamelCase : Dict = sample_hat + sigma_hat * model_output
lowerCamelCase : str = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A , derivative=__A , state=__A )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A = True , ):
"""simple docstring"""
lowerCamelCase : List[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A , derivative=__A , state=__A )
def _snake_case ( self , __A , __A , __A , __A ):
"""simple docstring"""
raise NotImplementedError()
| 340 | 1 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCAmelCase__ : float , UpperCAmelCase__ : Callable , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : str = None , ) -> int:
super().__init__()
lowerCAmelCase = initial_learning_rate
lowerCAmelCase = warmup_steps
lowerCAmelCase = power
lowerCAmelCase = decay_schedule_fn
lowerCAmelCase = name
def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> int:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase = tf.cast(UpperCAmelCase__ , tf.floataa )
lowerCAmelCase = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase = global_step_float / warmup_steps_float
lowerCAmelCase = self.initial_learning_rate * tf.math.pow(UpperCAmelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase__ , )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def a_ ( lowerCamelCase : float , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float = 0.0 , lowerCamelCase : float = 0.9 , lowerCamelCase : float = 0.999 , lowerCamelCase : float = 1e-8 , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : float = 0.0 , lowerCamelCase : float = 1.0 , lowerCamelCase : Optional[List[str]] = None , ):
lowerCAmelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCamelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCamelCase , )
if num_warmup_steps:
lowerCAmelCase = WarmUp(
initial_learning_rate=lowerCamelCase , decay_schedule_fn=lowerCamelCase , warmup_steps=lowerCamelCase , )
if weight_decay_rate > 0.0:
lowerCAmelCase = AdamWeightDecay(
learning_rate=lowerCamelCase , weight_decay_rate=lowerCamelCase , beta_a=lowerCamelCase , beta_a=lowerCamelCase , epsilon=lowerCamelCase , clipnorm=lowerCamelCase , global_clipnorm=lowerCamelCase , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=lowerCamelCase , )
else:
lowerCAmelCase = tf.keras.optimizers.Adam(
learning_rate=lowerCamelCase , beta_a=lowerCamelCase , beta_a=lowerCamelCase , epsilon=lowerCamelCase , clipnorm=lowerCamelCase , global_clipnorm=lowerCamelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase__ : float = 0.9 , UpperCAmelCase__ : float = 0.999 , UpperCAmelCase__ : float = 1E-7 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "AdamWeightDecay" , **UpperCAmelCase__ : str , ) -> int:
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = weight_decay_rate
lowerCAmelCase = include_in_weight_decay
lowerCAmelCase = exclude_from_weight_decay
@classmethod
def __UpperCAmelCase ( cls : int , UpperCAmelCase__ : List[str] ) -> str:
lowerCAmelCase = {'WarmUp': WarmUp}
return super(UpperCAmelCase__ , cls ).from_config(UpperCAmelCase__ , custom_objects=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ) -> Dict:
super(UpperCAmelCase__ , self )._prepare_local(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[Any]:
lowerCAmelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Dict ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = list(zip(*UpperCAmelCase__ ) )
return super(UpperCAmelCase__ , self ).apply_gradients(zip(UpperCAmelCase__ , UpperCAmelCase__ ) , name=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> Optional[Any]:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase = apply_state or {}
lowerCAmelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase = self._fallback_apply_state(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : str=None ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase__ )
lowerCAmelCase = self._decay_weights_op(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase__ , self )._resource_apply_dense(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]=None ) -> str:
lowerCAmelCase , lowerCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase__ )
lowerCAmelCase = self._decay_weights_op(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase__ , self )._resource_apply_sparse(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
lowerCAmelCase = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[Any] ) -> Tuple:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase__ , UpperCAmelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase__ , UpperCAmelCase__ ) is not None:
return False
return True
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict ) -> Tuple:
lowerCAmelCase = []
lowerCAmelCase = None
@property
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
if self._accum_steps is None:
lowerCAmelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Union[str, Any] , UpperCAmelCase__ : str ) -> List[Any]:
if not self._gradients:
lowerCAmelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase__ ) , trainable=UpperCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase__ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase__ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase__ )
self._accum_steps.assign_add(1 )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase__ ) )
| 709 |
'''simple docstring'''
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCAmelCase = _modexpt(lowerCamelCase , exponent // 2 , lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase , exponent - 1 , lowerCamelCase )) % modulo_value
def a_ ( lowerCamelCase : int = 1777 , lowerCamelCase : int = 1855 , lowerCamelCase : int = 8 ):
lowerCAmelCase = base
for _ in range(1 , lowerCamelCase ):
lowerCAmelCase = _modexpt(lowerCamelCase , lowerCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 513 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( _UpperCamelCase , unittest.TestCase ):
UpperCAmelCase = CLIPTokenizer
UpperCAmelCase = CLIPTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = {}
UpperCAmelCase = False
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_SCREAMING_SNAKE_CASE =['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE =['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
_SCREAMING_SNAKE_CASE ={'unk_token': '<unk>'}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCamelCase ( self : Tuple , **_a : Optional[int] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCamelCase ( self : int , _a : Dict ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE ='lower newer'
return input_text, output_text
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE =['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
_SCREAMING_SNAKE_CASE =tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_SCREAMING_SNAKE_CASE =tokens + [tokenizer.unk_token]
_SCREAMING_SNAKE_CASE =[10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
@require_ftfy
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE =self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_SCREAMING_SNAKE_CASE =self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_SCREAMING_SNAKE_CASE ='A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
_SCREAMING_SNAKE_CASE =tokenizer_s.tokenize(lowerCamelCase_ )
_SCREAMING_SNAKE_CASE =tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_SCREAMING_SNAKE_CASE ='xa\u0303y' + ' ' + 'x\xe3y'
_SCREAMING_SNAKE_CASE =tokenizer_s.tokenize(lowerCamelCase_ )
_SCREAMING_SNAKE_CASE =tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of space type
_SCREAMING_SNAKE_CASE =[
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_SCREAMING_SNAKE_CASE =tokenizer_s.tokenize(lowerCamelCase_ )
_SCREAMING_SNAKE_CASE =tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
_SCREAMING_SNAKE_CASE =[
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_SCREAMING_SNAKE_CASE =tokenizer_s.tokenize(lowerCamelCase_ )
_SCREAMING_SNAKE_CASE =tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE ='hello' # `hello` is a token in the vocabulary of `pretrained_name`
_SCREAMING_SNAKE_CASE =f"{text_of_1_token} {text_of_1_token}"
_SCREAMING_SNAKE_CASE =self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
_SCREAMING_SNAKE_CASE =tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_SCREAMING_SNAKE_CASE =f" {text}"
_SCREAMING_SNAKE_CASE =self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
_SCREAMING_SNAKE_CASE =tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass | 691 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = 42
_A = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 266 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class __lowerCamelCase ( lowercase__ ):
'''simple docstring'''
snake_case__ : Optional[Any] = """ernie_m"""
snake_case__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , a__ = 250002 , a__ = 768 , a__ = 12 , a__ = 12 , a__ = 3072 , a__ = "gelu" , a__ = 0.1 , a__ = 0.1 , a__ = 514 , a__ = 0.02 , a__ = 1 , a__ = 1e-05 , a__=None , a__=False , a__=0.0 , **a__ , ):
super().__init__(pad_token_id=__lowercase , **__lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = classifier_dropout
__SCREAMING_SNAKE_CASE : int = is_decoder
__SCREAMING_SNAKE_CASE : Optional[int] = act_dropout
| 715 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = IFInpaintingSuperResolutionPipeline
snake_case__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
snake_case__ : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a_ ( self ):
return self._get_superresolution_dummy_components()
def a_ ( self , a__ , a__=0 ):
if str(a__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(a__ )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a_ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def a_ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a_ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a_ ( self ):
self._test_save_load_local()
def a_ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 564 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=False, UpperCAmelCase__=False ) -> Tuple:
A_ = """backbone.""" if is_semantic else """"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(F'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(F'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(F'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False, UpperCAmelCase__=False ) -> Any:
for i in range(config.num_hidden_layers ):
A_ = """backbone.""" if is_semantic else """"""
# queries, keys and values
A_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
A_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = q_bias
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A_ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
A_ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
A_ = gamma_a
A_ = gamma_a
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = dct.pop(UpperCAmelCase__ )
A_ = val
def UpperCAmelCase__ ( ) -> int:
A_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False ) -> Union[str, Any]:
A_ = False if """rvlcdip""" in checkpoint_url else True
A_ = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase__, use_mask_token=UpperCAmelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A_ = 10_24
A_ = 40_96
A_ = 24
A_ = 16
# labels
if "rvlcdip" in checkpoint_url:
A_ = 16
A_ = """huggingface/label-files"""
A_ = """rvlcdip-id2label.json"""
A_ = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A_ = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" )["""model"""]
A_ = create_rename_keys(UpperCAmelCase__, has_lm_head=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, has_lm_head=UpperCAmelCase__ )
# load HuggingFace model
A_ = BeitForMaskedImageModeling(UpperCAmelCase__ ) if has_lm_head else BeitForImageClassification(UpperCAmelCase__ )
model.eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image
A_ = BeitImageProcessor(
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=UpperCAmelCase__ )
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase__, return_tensors="""pt""" )
A_ = encoding["""pixel_values"""]
A_ = model(UpperCAmelCase__ )
A_ = outputs.logits
# verify logits
A_ = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(UpperCAmelCase__ ), "Shape of logits not as expected"
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
if has_lm_head:
A_ = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A_ = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=UpperCAmelCase__, )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=UpperCAmelCase__, )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__lowerCamelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 288 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A__ ( _snake_case ):
lowercase = "bert-generation"
def __init__( self , UpperCamelCase__=50358 , UpperCamelCase__=1024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4096 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__="absolute" , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
| 288 | 1 |
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = '''WhisperFeatureExtractor'''
UpperCamelCase_ : Optional[int] = '''WhisperTokenizer'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = self.feature_extractor
SCREAMING_SNAKE_CASE : str = False
def _A ( self : int , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=True ):
return self.tokenizer.get_decoder_prompt_ids(task=UpperCAmelCase_ , language=UpperCAmelCase_ , no_timestamps=UpperCAmelCase_ )
def __call__( self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("audio" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("text" , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = args[0]
SCREAMING_SNAKE_CASE : int = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Tuple = encodings["input_ids"]
return inputs
def _A ( self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple="np" ):
return self.tokenizer.get_prompt_ids(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
| 709 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 488 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : Optional[int] = re.compile(r"\s+")
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(lowerCAmelCase_ , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = [len(lowerCAmelCase_ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(lowerCAmelCase_ ), "line_max": max(lowerCAmelCase_ )}
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_=5 ):
"""simple docstring"""
lowercase = ["auto-generated", "autogenerated", "automatically generated"]
lowercase = example["content"].splitlines()
for _, line in zip(range(lowerCAmelCase_ ) , lowerCAmelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_=5 , lowerCAmelCase_=0.05 ):
"""simple docstring"""
lowercase = ["unit tests", "test file", "configuration file"]
lowercase = example["content"].splitlines()
lowercase = 0
lowercase = 0
# first test
for _, line in zip(range(lowerCAmelCase_ ) , lowerCAmelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowercase = example["content"].count("\n" )
lowercase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = ["def ", "class ", "for ", "while "]
lowercase = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_=4 ):
"""simple docstring"""
lowercase = example["content"].splitlines()
lowercase = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tokenizer(example["content"] , truncation=lowerCAmelCase_ )["input_ids"]
lowercase = len(example["content"] ) / len(lowerCAmelCase_ )
return {"ratio": ratio}
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = {}
results.update(get_hash(lowerCAmelCase_ ) )
results.update(line_stats(lowerCAmelCase_ ) )
results.update(alpha_stats(lowerCAmelCase_ ) )
results.update(char_token_ratio(lowerCAmelCase_ ) )
results.update(is_autogenerated(lowerCAmelCase_ ) )
results.update(is_config_or_test(lowerCAmelCase_ ) )
results.update(has_no_keywords(lowerCAmelCase_ ) )
results.update(has_few_assignments(lowerCAmelCase_ ) )
return results
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if not check_uniques(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_ , "rb" ) as f_in:
with gzip.open(str(lowerCAmelCase_ ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ )
os.unlink(lowerCAmelCase_ )
# Settings
__lowerCamelCase : Tuple = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[str] = multiprocessing.cpu_count()
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Tuple = time.time()
__lowerCamelCase : List[Any] = load_dataset(args.dataset_name, split="train")
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique("hash"))
__lowerCamelCase : str = len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
__lowerCamelCase : Optional[int] = time.time()
__lowerCamelCase : int = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
__lowerCamelCase : int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : Tuple = output_dir / "data"
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Optional[int] = str(data_dir / f"file-{file_number+1:012}.json")
__lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| 310 |
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase , lowercase = 0, 1
while True:
lowercase , lowercase = b, a + b
yield b
def UpperCAmelCase_ ( lowerCAmelCase_ = 1000 ):
"""simple docstring"""
lowercase = 1
lowercase = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 310 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (_UpperCAmelCase ):
'''simple docstring'''
__snake_case : Optional[int] = ["audio_values", "audio_mask"]
def __init__( self: Dict , UpperCAmelCase_: List[str]=2_048 , UpperCAmelCase_: Dict=1 , UpperCAmelCase_: int=[16, 16] , UpperCAmelCase_: str=128 , UpperCAmelCase_: str=44_100 , UpperCAmelCase_: List[Any]=86 , UpperCAmelCase_: Union[str, Any]=2_048 , UpperCAmelCase_: Optional[int]=0.0 , **UpperCAmelCase_: str , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = spectrogram_length
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1]
_SCREAMING_SNAKE_CASE = n_fft
_SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate
_SCREAMING_SNAKE_CASE = sampling_rate
_SCREAMING_SNAKE_CASE = padding_value
_SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=UpperCAmelCase_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def UpperCamelCase ( self: Dict , UpperCAmelCase_: np.array ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = spectrogram(
UpperCAmelCase_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_SCREAMING_SNAKE_CASE = log_spec[:, :-1]
_SCREAMING_SNAKE_CASE = log_spec - 20.0
_SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self: Union[str, Any] , UpperCAmelCase_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[int] = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_SCREAMING_SNAKE_CASE = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
_SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
_SCREAMING_SNAKE_CASE = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_SCREAMING_SNAKE_CASE = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_SCREAMING_SNAKE_CASE = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_SCREAMING_SNAKE_CASE = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase_ ).astype(np.floataa )
# convert into correct format for padding
_SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_SCREAMING_SNAKE_CASE = np.ones([len(UpperCAmelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase_ ) ):
_SCREAMING_SNAKE_CASE = audio_features[i]
_SCREAMING_SNAKE_CASE = feature
# return as BatchFeature
if return_attention_mask:
_SCREAMING_SNAKE_CASE = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_SCREAMING_SNAKE_CASE = {"""audio_values""": padded_audio_features}
_SCREAMING_SNAKE_CASE = BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
return encoded_inputs
| 712 |
UpperCamelCase = 256
# Modulus to hash a string
UpperCamelCase = 1_000_003
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = len(snake_case__ )
if p_len > t_len:
return False
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_SCREAMING_SNAKE_CASE = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_SCREAMING_SNAKE_CASE = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_SCREAMING_SNAKE_CASE = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """abc1abc12"""
_SCREAMING_SNAKE_CASE = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_SCREAMING_SNAKE_CASE = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(snake_case__ ,snake_case__ ) and not rabin_karp(snake_case__ ,snake_case__ )
# Test 2)
_SCREAMING_SNAKE_CASE = """ABABX"""
_SCREAMING_SNAKE_CASE = """ABABZABABYABABX"""
assert rabin_karp(snake_case__ ,snake_case__ )
# Test 3)
_SCREAMING_SNAKE_CASE = """AAAB"""
_SCREAMING_SNAKE_CASE = """ABAAAAAB"""
assert rabin_karp(snake_case__ ,snake_case__ )
# Test 4)
_SCREAMING_SNAKE_CASE = """abcdabcy"""
_SCREAMING_SNAKE_CASE = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(snake_case__ ,snake_case__ )
# Test 5)
_SCREAMING_SNAKE_CASE = """Lü"""
_SCREAMING_SNAKE_CASE = """Lüsai"""
assert rabin_karp(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """Lue"""
assert not rabin_karp(snake_case__ ,snake_case__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 569 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
lowercase : str = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
lowercase : Dict = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowercase : Optional[int] = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase : Optional[int] = model(snake_case )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,snake_case ,atol=1e-3 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
lowercase : Any = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
lowercase : Optional[int] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
lowercase : Any = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase : Tuple = model(snake_case )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,snake_case ,atol=1e-3 ) )
| 336 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : str = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : str = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE : List[str] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 714 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 0 |
"""simple docstring"""
import numpy as np
class UpperCAmelCase :
def __init__( self : Any ):
"""simple docstring"""
_snake_case = (0, 0)
_snake_case = None
_snake_case = 0
_snake_case = 0
_snake_case = 0
def __eq__( self : int , __lowerCamelCase : Dict ):
"""simple docstring"""
return self.position == cell.position
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
print(self.position )
class UpperCAmelCase :
def __init__( self : Dict , __lowerCamelCase : Optional[int]=(5, 5) ):
"""simple docstring"""
_snake_case = np.zeros(__lowerCamelCase )
_snake_case = world_size[0]
_snake_case = world_size[1]
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
print(self.w )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_snake_case = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_snake_case = cell.position[0]
_snake_case = cell.position[1]
_snake_case = []
for n in neughbour_cord:
_snake_case = current_x + n[0]
_snake_case = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_snake_case = Cell()
_snake_case = (x, y)
_snake_case = cell
neighbours.append(__lowerCamelCase )
return neighbours
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = []
_snake_case = []
_open.append(lowerCAmelCase_ )
while _open:
_snake_case = np.argmin([n.f for n in _open] )
_snake_case = _open[min_f]
_closed.append(_open.pop(lowerCAmelCase_ ) )
if current == goal:
break
for n in world.get_neigbours(lowerCAmelCase_ ):
for c in _closed:
if c == n:
continue
_snake_case = current.g + 1
_snake_case , _snake_case = n.position
_snake_case , _snake_case = goal.position
_snake_case = (ya - ya) ** 2 + (xa - xa) ** 2
_snake_case = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowerCAmelCase_ )
_snake_case = []
while current.parent is not None:
path.append(current.position )
_snake_case = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
snake_case = Gridworld()
# Start position and goal
snake_case = Cell()
snake_case = (0, 0)
snake_case = Cell()
snake_case = (4, 4)
print(F"path from {start.position} to {goal.position}")
snake_case = astar(world, start, goal)
# Just for visual reasons.
for i in s:
snake_case = 1
print(world.w)
| 103 |
"""simple docstring"""
import operator
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = None ) -> list:
_snake_case = operator.lt if reverse else operator.gt
_snake_case = solution or []
if not arr:
return solution
_snake_case = [arr.pop(0 )]
for i, item in enumerate(lowerCAmelCase_ ):
if _operator(lowerCAmelCase_ , sublist[-1] ):
sublist.append(lowerCAmelCase_ )
arr.pop(lowerCAmelCase_ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCAmelCase_ )
else:
while sublist:
_snake_case = sublist.pop(0 )
for i, xx in enumerate(lowerCAmelCase_ ):
if not _operator(lowerCAmelCase_ , lowerCAmelCase_ ):
solution.insert(lowerCAmelCase_ , lowerCAmelCase_ )
break
else:
solution.append(lowerCAmelCase_ )
strand_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 103 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = AudioLDMPipeline
UpperCAmelCase = TEXT_TO_AUDIO_PARAMS
UpperCAmelCase = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCAmelCase = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_A , )
_UpperCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_UpperCamelCase = ClapTextModelWithProjection(_A )
_UpperCamelCase = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
_UpperCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_A , )
_UpperCamelCase = SpeechTaHifiGan(_A )
_UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def UpperCamelCase_ ( self : Optional[int] , _A : int , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = AudioLDMPipeline(**_A )
_UpperCamelCase = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = audioldm_pipe(**_A )
_UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(_A ) == 256
_UpperCamelCase = audio[:10]
_UpperCamelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = AudioLDMPipeline(**_A )
_UpperCamelCase = audioldm_pipe.to(_A )
_UpperCamelCase = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = 3 * [inputs['''prompt''']]
# forward
_UpperCamelCase = audioldm_pipe(**_A )
_UpperCamelCase = output.audios[0]
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = 3 * [inputs.pop('''prompt''' )]
_UpperCamelCase = audioldm_pipe.tokenizer(
_A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs['''input_ids'''].to(_A )
_UpperCamelCase = audioldm_pipe.text_encoder(
_A , )
_UpperCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCamelCase = F.normalize(_A , dim=-1 )
_UpperCamelCase = prompt_embeds
# forward
_UpperCamelCase = audioldm_pipe(**_A )
_UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = AudioLDMPipeline(**_A )
_UpperCamelCase = audioldm_pipe.to(_A )
_UpperCamelCase = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = 3 * ['''this is a negative prompt''']
_UpperCamelCase = negative_prompt
_UpperCamelCase = 3 * [inputs['''prompt''']]
# forward
_UpperCamelCase = audioldm_pipe(**_A )
_UpperCamelCase = output.audios[0]
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = 3 * [inputs.pop('''prompt''' )]
_UpperCamelCase = []
for p in [prompt, negative_prompt]:
_UpperCamelCase = audioldm_pipe.tokenizer(
_A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs['''input_ids'''].to(_A )
_UpperCamelCase = audioldm_pipe.text_encoder(
_A , )
_UpperCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCamelCase = F.normalize(_A , dim=-1 )
embeds.append(_A )
_UpperCamelCase , _UpperCamelCase = embeds
# forward
_UpperCamelCase = audioldm_pipe(**_A )
_UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(skip_prk_steps=_A )
_UpperCamelCase = AudioLDMPipeline(**_A )
_UpperCamelCase = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = '''egg cracking'''
_UpperCamelCase = audioldm_pipe(**_A , negative_prompt=_A )
_UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(_A ) == 256
_UpperCamelCase = audio[:10]
_UpperCamelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(skip_prk_steps=_A )
_UpperCamelCase = AudioLDMPipeline(**_A )
_UpperCamelCase = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
_UpperCamelCase = audioldm_pipe(_A , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_UpperCamelCase = 2
_UpperCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_UpperCamelCase = 2
_UpperCamelCase = audioldm_pipe(_A , num_inference_steps=2 , num_waveforms_per_prompt=_A ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_UpperCamelCase = 2
_UpperCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_A ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = AudioLDMPipeline(**_A )
_UpperCamelCase = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = audioldm_pipe.vocoder.config.sampling_rate
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **_A )
_UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(_A ) / vocoder_sampling_rate == 0.016
_UpperCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **_A )
_UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(_A ) / vocoder_sampling_rate == 0.032
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = AudioLDMPipeline(**_A )
_UpperCamelCase = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = ['''hey''']
_UpperCamelCase = audioldm_pipe(_A , num_inference_steps=1 )
_UpperCamelCase = output.audios.shape
assert audio_shape == (1, 256)
_UpperCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_UpperCamelCase = SpeechTaHifiGan(_A ).to(_A )
_UpperCamelCase = audioldm_pipe(_A , num_inference_steps=1 )
_UpperCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCamelCase_ ( self : Optional[int] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A )
def UpperCamelCase_ ( self : str ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_A )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] , _A : Union[str, Any] , _A : List[str]="cpu" , _A : Any=torch.floataa , _A : Dict=0 ):
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = np.random.RandomState(_A ).standard_normal((1, 8, 128, 16) )
_UpperCamelCase = torch.from_numpy(_A ).to(device=_A , dtype=_A )
_UpperCamelCase = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
_UpperCamelCase = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_inputs(_A )
_UpperCamelCase = 25
_UpperCamelCase = audioldm_pipe(**_A ).audios[0]
assert audio.ndim == 1
assert len(_A ) == 8_1920
_UpperCamelCase = audio[7_7230:7_7240]
_UpperCamelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
_UpperCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_UpperCamelCase = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_inputs(_A )
_UpperCamelCase = audioldm_pipe(**_A ).audios[0]
assert audio.ndim == 1
assert len(_A ) == 8_1920
_UpperCamelCase = audio[2_7780:2_7790]
_UpperCamelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 721 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ):
super().__init__(**_A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
_UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self : Any , _A : Any ):
_UpperCamelCase = self.convolution(self.padding(_A ) )
_UpperCamelCase = self.normalization(_A )
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ):
super().__init__(**_A )
_UpperCamelCase = config.num_channels
_UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ):
_UpperCamelCase = shape_list(_A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) )
_UpperCamelCase = self.embedder(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ):
return self.normalization(self.convolution(_A ) , training=_A )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , _A : int , _A : int , **_A : Dict ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
_UpperCamelCase = [
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def UpperCamelCase_ ( self : List[str] , _A : List[Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_UpperCamelCase = self.pooler(_A )
for layer_module in self.attention:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Dict , _A : Tuple ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Tuple , _A : List[Any] ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ):
super().__init__(**_A )
_UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
_UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(_A , _A , _A , stride=_A , name='''layers.0''' ),
*[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ):
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ):
super().__init__(**_A )
_UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) )
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ):
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(_A )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
UpperCAmelCase = RegNetConfig
def __init__( self : int , _A : Tuple , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = config
_UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' )
_UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
@unpack_inputs
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(_A , training=_A )
_UpperCamelCase = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(_A )
# Change to NCHW output format have uniformity in the modules
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = RegNetConfig
UpperCAmelCase = "regnet"
UpperCAmelCase = "pixel_values"
@property
def UpperCamelCase_ ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", __lowercase, )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, )
class lowerCAmelCase_ ( __lowercase, __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = config.num_labels
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
# classification head
_UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier[0](_A )
_UpperCamelCase = self.classifier[1](_A )
_UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 71 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 283 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = ["torch", "torchsde"]
def __init__(self : List[str] , *snake_case_ : List[Any] , **snake_case_ : str ):
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowerCAmelCase (cls : List[str] , *snake_case_ : List[Any] , **snake_case_ : Dict ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowerCAmelCase (cls : Any , *snake_case_ : List[str] , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 326 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase__ ='true'
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=8_2 , lowerCAmelCase__ : List[str]=1_6 ):
set_seed(4_2 )
__a : Dict = RegressionModel()
__a : str = deepcopy(lowerCAmelCase__ )
__a : List[Any] = RegressionDataset(length=lowerCAmelCase__ )
__a : Tuple = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
model.to(accelerator.device )
__a , __a : Any = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
return model, ddp_model, dataloader
def __UpperCamelCase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int=False ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__a : Any = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowerCAmelCase__ : Optional[int] ):
__a : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
with accelerator.main_process_first():
__a : Any = dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__a : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : Dict ):
if use_longest:
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1_6 )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
__a : Any = Accelerator(dispatch_batches=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
__a : List[str] = get_dataloader(lowerCAmelCase__ , not dispatch_batches )
__a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowerCAmelCase__ )
__a , __a : Tuple = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ):
__a : List[Any] = []
for batch in dataloader:
__a , __a : Optional[int] = batch.values()
with torch.no_grad():
__a : Dict = model(lowerCAmelCase__ )
__a , __a : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__a , __a : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase__ )
targs.append(lowerCAmelCase__ )
__a , __a : Tuple = torch.cat(lowerCAmelCase__ ), torch.cat(lowerCAmelCase__ )
return logits, targs
def __UpperCamelCase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : List[Any]=8_2 , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Any=1_6 ):
__a , __a , __a : List[Any] = get_basic_setup(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a , __a : str = generate_predictions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert (
len(lowerCAmelCase__ ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase__ )}"
def __UpperCamelCase ( lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False ):
__a : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
__a , __a : Optional[int] = get_mrpc_setup(lowerCAmelCase__ , lowerCAmelCase__ )
# First do baseline
__a , __a , __a : Any = setup['''no''']
model.to(lowerCAmelCase__ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase__ )
with torch.inference_mode():
__a : Union[str, Any] = model(**lowerCAmelCase__ )
__a : Tuple = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase__ , references=batch['''labels'''] )
__a : List[Any] = metric.compute()
# Then do distributed
__a , __a , __a : Any = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__a : Optional[int] = model(**lowerCAmelCase__ )
__a : Optional[Any] = outputs.logits.argmax(dim=-1 )
__a : Union[str, Any] = batch['''labels''']
__a , __a : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
__a : Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def __UpperCamelCase ( ):
__a : Optional[Any] = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__a : List[Any] = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(lowerCAmelCase__ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__a : Dict = Accelerator()
test_torch_metrics(lowerCAmelCase__ , 5_1_2 )
accelerator.state._reset_state()
def __UpperCamelCase ( lowerCAmelCase__ : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 326 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 250 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 0 |
'''simple docstring'''
import random
def A__ ( A : int , A : List[str] , A : Optional[int]):
'''simple docstring'''
UpperCamelCase : List[Any] = a[left_index]
UpperCamelCase : Any = left_index + 1
for j in range(left_index + 1 , A):
if a[j] < pivot:
UpperCamelCase : Union[str, Any] = a[i], a[j]
i += 1
UpperCamelCase : Any = a[i - 1], a[left_index]
return i - 1
def A__ ( A : Any , A : Dict , A : Dict):
'''simple docstring'''
if left < right:
UpperCamelCase : str = random.randint(A , right - 1)
UpperCamelCase : List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase : Tuple = partition(A , A , A)
quick_sort_random(
A , A , A) # recursive quicksort to the left of the pivot point
quick_sort_random(
A , pivot_index + 1 , A) # recursive quicksort to the right of the pivot point
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase : Tuple = [int(A) for item in user_input.split(",")]
quick_sort_random(A , 0 , len(A))
print(A)
if __name__ == "__main__":
main()
| 700 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = StableUnCLIPPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 32
UpperCamelCase : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCamelCase : Tuple = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCamelCase : Dict = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCamelCase , num_layers=1 , )
torch.manual_seed(0 )
UpperCamelCase : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowerCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
UpperCamelCase : Dict = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
UpperCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
UpperCamelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = AutoencoderKL()
UpperCamelCase : Dict = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase=0 ) -> int:
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
UpperCamelCase : Tuple = torch.manual_seed(lowerCamelCase )
else:
UpperCamelCase : str = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
UpperCamelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : List[str] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
UpperCamelCase : Optional[int] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase : Tuple = pipe("anime turle" , generator=lowerCamelCase , output_type="np" )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase : Any = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
UpperCamelCase : Dict = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase : Optional[Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
UpperCamelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 435 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case ( snake_case : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(snake_case ):
for j in range(snake_case ):
lowerCAmelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_UpperCamelCase : str = imread("image_data/lena.jpg", 1)
# convert to its negative
_UpperCamelCase : int = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 284 |
'''simple docstring'''
def snake_case ( snake_case : int , snake_case : int , snake_case : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCAmelCase = _modexpt(snake_case , exponent // 2 , snake_case ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(snake_case , exponent - 1 , snake_case )) % modulo_value
def snake_case ( snake_case : int = 1777 , snake_case : int = 1855 , snake_case : int = 8 ) -> int:
"""simple docstring"""
lowerCAmelCase = base
for _ in range(1 , snake_case ):
lowerCAmelCase = _modexpt(snake_case , snake_case , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 284 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "speech_to_text_2"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , _lowercase=10000 , _lowercase=6 , _lowercase=2048 , _lowercase=4 , _lowercase=0.0 , _lowercase=True , _lowercase="relu" , _lowercase=256 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=2 , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=1024 , **_lowercase , ):
'''simple docstring'''
__a : Optional[int] = vocab_size
__a : Any = d_model
__a : List[str] = decoder_ffn_dim
__a : str = decoder_layers
__a : Optional[Any] = decoder_attention_heads
__a : Optional[int] = dropout
__a : Tuple = attention_dropout
__a : int = activation_dropout
__a : List[str] = activation_function
__a : int = init_std
__a : Dict = decoder_layerdrop
__a : Dict = use_cache
__a : Tuple = decoder_layers
__a : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__a : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 63 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Tuple = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "longformer"
def __init__( self : List[str] , _lowercase : Union[List[int], int] = 5_12 , _lowercase : int = 2 , _lowercase : int = 1 , _lowercase : int = 0 , _lowercase : int = 2 , _lowercase : int = 3_05_22 , _lowercase : int = 7_68 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 30_72 , _lowercase : str = "gelu" , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : int = 5_12 , _lowercase : int = 2 , _lowercase : float = 0.02 , _lowercase : float = 1E-12 , _lowercase : bool = False , **_lowercase : List[str] , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = attention_window
__UpperCAmelCase = sep_token_id
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = onnx_export
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : List[str] , _lowercase : "PretrainedConfig" , _lowercase : str = "default" , _lowercase : "List[PatchingSpec]" = None ):
super().__init__(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = True
@property
def a ( self : Dict ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def a ( self : int ):
__UpperCAmelCase = super().outputs
if self.task == "default":
__UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def a ( self : Any ):
return 1E-4
@property
def a ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def a ( self : List[str] , _lowercase : "PreTrainedTokenizerBase" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
__UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
__UpperCAmelCase = 1
return inputs
| 49 | """simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a ( lowerCAmelCase_ ):
@slow
@require_torch
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase = bertabert.config.encoder.vocab_size
_UpperCAmelCase = tokenizer.sep_token_id
_UpperCAmelCase = tokenizer.cls_token_id
_UpperCAmelCase = 128
_UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_UpperCAmelCase = train_dataset.select(range(32 ) )
_UpperCAmelCase = val_dataset.select(range(16 ) )
_UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(__lowerCAmelCase : Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=512 )
_UpperCAmelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=128 )
_UpperCAmelCase = inputs.input_ids
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = outputs.input_ids
_UpperCAmelCase = outputs.input_ids.copy()
_UpperCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase = outputs.attention_mask
assert all(len(__lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCAmelCase : int ):
_UpperCAmelCase = pred.label_ids
_UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCAmelCase ) )] ) / len(__lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=__lowerCAmelCase , per_device_train_batch_size=__lowerCAmelCase , per_device_eval_batch_size=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , evaluation_strategy="""steps""" , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# start training
trainer.train()
| 277 | 0 |
import baseaa
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return baseaa.baaencode(string.encode('utf-8' ) )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return baseaa.baadecode(UpperCAmelCase__ ).decode('utf-8' )
if __name__ == "__main__":
_lowerCamelCase ="Hello World!"
_lowerCamelCase =baseaa_encode(test)
print(encoded)
_lowerCamelCase =baseaa_decode(encoded)
print(decoded)
| 715 |
_lowerCamelCase =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowerCamelCase =[None] * 10_00_00_00
_lowerCamelCase =True
_lowerCamelCase =False
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
SCREAMING_SNAKE_CASE =chain(next_number(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE =number_chain
while number < 10000000:
SCREAMING_SNAKE_CASE =number_chain
number *= 10
return number_chain
def snake_case__ ( lowerCAmelCase_ = 10000000 ):
"""simple docstring"""
for i in range(1, lowerCAmelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 252 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :Tuple=32 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :List[Any]=37 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[Any]=512 , lowerCAmelCase__ :Optional[Any]=16 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :List[str]=0.02 , lowerCAmelCase__ :str=False , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="None" , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :Tuple=None , ) ->Optional[int]:
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = relative_attention
lowercase = position_biased_input
lowercase = pos_att_type
lowercase = scope
def SCREAMING_SNAKE_CASE( self :str ) ->Optional[int]:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE( self :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str ) ->List[str]:
lowercase = TFDebertaVaModel(config=lowerCAmelCase__ )
lowercase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase = [input_ids, input_mask]
lowercase = model(lowerCAmelCase__ )
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict ) ->Any:
lowercase = TFDebertaVaForMaskedLM(config=lowerCAmelCase__ )
lowercase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] ) ->List[Any]:
lowercase = self.num_labels
lowercase = TFDebertaVaForSequenceClassification(config=lowerCAmelCase__ )
lowercase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE( self :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str ) ->Union[str, Any]:
lowercase = self.num_labels
lowercase = TFDebertaVaForTokenClassification(config=lowerCAmelCase__ )
lowercase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE( self :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) ->List[str]:
lowercase = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase__ )
lowercase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE( self :Any ) ->int:
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase : Any = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase : Optional[int] = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->str:
lowercase = TFDebertaVaModelTester(self )
lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Union[str, Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Dict:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Dict:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Dict:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Dict:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Any ) ->Union[str, Any]:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->Optional[int]:
lowercase = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->List[str]:
pass
@slow
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Any:
lowercase = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
lowercase = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowercase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 )
| 441 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __a :
pass | 552 | 0 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Union[str, Any] = len(lowercase__ )
_lowerCamelCase : int = int(math.floor(math.sqrt(lowercase__ ) ) )
_lowerCamelCase : Tuple = 0
while arr[min(lowercase__ , lowercase__ ) - 1] < x:
_lowerCamelCase : Tuple = step
step += int(math.floor(math.sqrt(lowercase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_lowerCamelCase : int = prev + 1
if prev == min(lowercase__ , lowercase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
lowercase__ = int(input("""Enter the number to be searched:\n"""))
lowercase__ = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F"Number {x} is at index {res}") | 492 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowercase__ = logging.get_logger(__name__)
lowercase__ = {}
lowercase__ = {}
lowercase__ = {}
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , ):
_lowerCamelCase : str = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
_lowerCamelCase : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
_lowerCamelCase : Dict = format_type
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None ):
_lowerCamelCase : Dict = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_lowerCamelCase : Optional[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
lowercase__ = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
lowercase__ = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
lowercase__ = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def _snake_case ( lowercase__ ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _snake_case ( lowercase__ , **lowercase__ ):
_lowerCamelCase : List[str] = get_format_type_from_alias(lowercase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowercase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' ) | 492 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 | """simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: List[str] = args.log_outputs
lowercase__: Optional[int] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__: Union[str, Any] = load_metric('''wer''' )
lowercase__: List[str] = load_metric('''cer''' )
# compute metrics
lowercase__: List[str] = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__: List[Any] = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__: Dict = F"""WER: {wer_result}\nCER: {cer_result}"""
print(__UpperCAmelCase )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(__UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__: int = F"""log_{dataset_id}_predictions.txt"""
lowercase__: Dict = F"""log_{dataset_id}_targets.txt"""
with open(__UpperCAmelCase , '''w''' ) as p, open(__UpperCAmelCase , '''w''' ) as t:
# mapping function to write output
def write_to_file(__UpperCAmelCase , __UpperCAmelCase ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(__UpperCAmelCase , with_indices=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: Any = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__: Optional[Any] = re.sub(__UpperCAmelCase , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__: Tuple = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__: Dict = ''' '''.join(text.split(__UpperCAmelCase ) )
return text
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
# load dataset
lowercase__: Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__: Tuple = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__: Optional[Any] = feature_extractor.sampling_rate
# resample audio
lowercase__: str = dataset.cast_column('''audio''' , Audio(sampling_rate=__UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
lowercase__: Dict = 0 if torch.cuda.is_available() else -1
lowercase__: Optional[Any] = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCAmelCase ):
lowercase__: Dict = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__: str = prediction['''text''']
lowercase__: Dict = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__: List[str] = dataset.map(__UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
__A = parser.parse_args()
main(args)
| 586 | 0 |
def _a ( __UpperCamelCase , __UpperCamelCase ):
return int(input_a == input_a == 0 )
def _a ( ):
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 716 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase = logging.getLogger(__name__)
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: str = """summarization"""
lowerCamelCase__: List[str] = ["""loss"""]
lowerCamelCase__: List[str] = ROUGE_KEYS
lowerCamelCase__: Union[str, Any] = """rouge2"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ):
if hparams.sortish_sampler and hparams.gpus > 1:
a_ : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCamelCase_ , num_labels=lowerCamelCase_ , mode=self.mode , **lowerCamelCase_ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
a_ : Any = Path(self.output_dir ) / """metrics.json"""
a_ : Tuple = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
a_ : Dict = 0
a_ : Optional[Any] = defaultdict(lowerCamelCase_ )
a_ : str = self.config.model_type
a_ : Union[str, Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
a_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
a_ : int = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
a_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
a_ : Optional[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
a_ : int = get_git_info()["""repo_sha"""]
a_ : Union[str, Any] = hparams.num_workers
a_ : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase_ ):
a_ : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
a_ : List[str] = self.decoder_start_token_id
a_ : Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
a_ : Union[str, Any] = False
a_ : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
a_ : Union[str, Any] = self.hparams.eval_max_gen_length
else:
a_ : Optional[Any] = self.model.config.max_length
a_ : Optional[int] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : Dict[str, torch.Tensor] ):
a_ : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase_ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
a_ : List[Any] = True
return readable_batch
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
return self.model(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : List[int] ):
a_ : str = self.tokenizer.batch_decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return lmap(str.strip , lowerCamelCase_ )
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : dict ):
a_ : int = self.tokenizer.pad_token_id
a_ , a_ : Optional[int] = batch["""input_ids"""], batch["""attention_mask"""]
a_ : Optional[Any] = batch["""labels"""]
if isinstance(self.model , lowerCamelCase_ ):
a_ : List[Any] = self.model._shift_right(lowerCamelCase_ )
else:
a_ : Dict = shift_tokens_right(lowerCamelCase_ , lowerCamelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
a_ : int = decoder_input_ids
self.save_readable_batch(lowerCamelCase_ )
a_ : str = self(lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , use_cache=lowerCamelCase_ )
a_ : int = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
a_ : Any = nn.CrossEntropyLoss(ignore_index=lowerCamelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
a_ : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
a_ : int = nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )
a_ , a_ : str = label_smoothed_nll_loss(
lowerCamelCase_ , lowerCamelCase_ , self.hparams.label_smoothing , ignore_index=lowerCamelCase_ )
return (loss,)
@property
def UpperCAmelCase( self : Union[str, Any] ):
return self.tokenizer.pad_token_id
def UpperCAmelCase( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ):
a_ : Dict = self._step(lowerCamelCase_ )
a_ : Optional[int] = dict(zip(self.loss_names , lowerCamelCase_ ) )
# tokens per batch
a_ : str = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
a_ : Optional[int] = batch["""input_ids"""].shape[0]
a_ : int = batch["""input_ids"""].eq(self.pad ).sum()
a_ : str = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int]="val" ):
self.step_count += 1
a_ : Optional[Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
a_ : Tuple = losses["""loss"""]
a_ : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
a_ : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
a_ : torch.FloatTensor = torch.tensor(lowerCamelCase_ ).type_as(lowerCamelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase_ )
a_ : str = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
a_ : Union[str, Any] = self.step_count
self.metrics[prefix].append(lowerCamelCase_ ) # callback writes this to self.metrics_save_path
a_ : List[str] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCAmelCase( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
return calculate_rouge(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int , lowerCamelCase_ : dict ):
a_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
a_ : Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCamelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
a_ : Optional[int] = (time.time() - ta) / batch["""input_ids"""].shape[0]
a_ : List[str] = self.ids_to_clean_text(lowerCamelCase_ )
a_ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
a_ : Tuple = self._step(lowerCamelCase_ )
a_ : List[str] = dict(zip(self.loss_names , lowerCamelCase_ ) )
a_ : Dict = self.calc_generative_metrics(lowerCamelCase_ , lowerCamelCase_ )
a_ : Optional[int] = np.mean(lmap(lowerCamelCase_ , lowerCamelCase_ ) )
base_metrics.update(gen_time=lowerCamelCase_ , gen_len=lowerCamelCase_ , preds=lowerCamelCase_ , target=lowerCamelCase_ , **lowerCamelCase_ )
return base_metrics
def UpperCAmelCase( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : str , lowerCamelCase_ : Any ):
return self.validation_epoch_end(lowerCamelCase_ , prefix="""test""" )
def UpperCAmelCase( self : Any , lowerCamelCase_ : Any ):
a_ : List[str] = self.n_obs[type_path]
a_ : Dict = self.target_lens[type_path]
a_ : Optional[Any] = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase_ , n_obs=lowerCamelCase_ , max_target_length=lowerCamelCase_ , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : bool = False ):
a_ : List[str] = self.get_dataset(lowerCamelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
a_ : List[str] = dataset.make_sortish_sampler(lowerCamelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
a_ : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
def UpperCAmelCase( self : Any ):
a_ : int = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase_ )
return dataloader
def UpperCAmelCase( self : Dict ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase( self : List[Any] ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
BaseTransformer.add_model_specific_args(lowerCamelCase_ , lowerCamelCase_ )
add_generic_args(lowerCamelCase_ , lowerCamelCase_ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCamelCase_ , default=lowerCamelCase_ )
parser.add_argument("""--logger_name""" , type=lowerCamelCase_ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCamelCase_ , default=5_0_0 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCamelCase_ , default="""summarization""" , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCamelCase_ , default=0.0 , required=lowerCamelCase_ )
parser.add_argument("""--src_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--tgt_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--eval_beams""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ )
parser.add_argument(
"""--val_metric""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCamelCase_ , default=1 , required=lowerCamelCase_ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: List[Any] = """translation"""
lowerCamelCase__: int = ["""loss"""]
lowerCamelCase__: List[str] = ["""bleu"""]
lowerCamelCase__: Optional[Any] = """bleu"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Tuple ):
super().__init__(lowerCamelCase_ , **lowerCamelCase_ )
a_ : Union[str, Any] = hparams.src_lang
a_ : Optional[int] = hparams.tgt_lang
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
return calculate_bleu(lowerCamelCase_ , lowerCamelCase_ )
def _a ( __UpperCamelCase , __UpperCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__UpperCamelCase )
check_output_dir(__UpperCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
a_ : SummarizationModule = SummarizationModule(__UpperCamelCase )
else:
a_ : SummarizationModule = TranslationModule(__UpperCamelCase )
a_ : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
a_ : Union[str, Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = os.environ.get("""WANDB_PROJECT""" , __UpperCamelCase )
a_ : str = WandbLogger(name=model.output_dir.name , project=__UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
a_ : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
a_ : str = False
a_ : Tuple = args.val_metric == """loss"""
a_ : pl.Trainer = generic_train(
__UpperCamelCase , __UpperCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCamelCase ) , early_stopping_callback=__UpperCamelCase , logger=__UpperCamelCase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
a_ : Union[str, Any] = """"""
a_ : int = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=__UpperCamelCase ) )
if checkpoints:
a_ : Tuple = checkpoints[-1]
a_ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
main(args)
| 478 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 79 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_( self )-> Any:
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_lowerCamelCase ).to(_lowerCamelCase )
lowercase__ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__ = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase__ = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase__ = model(input_ids.to(_lowerCamelCase ) , labels=labels.to(_lowerCamelCase ) ).loss
lowercase__ = -(labels.shape[-1] * loss.item())
lowercase__ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 161 | 0 |
"""simple docstring"""
import math
import qiskit
def UpperCAmelCase_ ( __a : int = 1 , __a : int = 1 , __a : int = 1 ):
'''simple docstring'''
if (
isinstance(__a , __a )
or isinstance(__a , __a )
or isinstance(__a , __a )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__a ) != input_a)
or (math.floor(__a ) != input_a)
or (math.floor(__a ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_lowerCamelCase : List[str] = qiskit.QuantumRegister(4 , 'qr' )
_lowerCamelCase : Optional[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_lowerCamelCase : int = [input_a, input_a, carry_in]
_lowerCamelCase : str = qiskit.QuantumCircuit(__a , __a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __a ) # measure the last two qbits
_lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
_lowerCamelCase : Tuple = qiskit.execute(__a , __a , shots=10_00 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 349 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a_ = sys.version_info >= (3, 10)
def UpperCAmelCase_ ( __a : List[str]=None , __a : List[str]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class A_:
"""simple docstring"""
a_ : int
a_ : float
a_ : str
a_ : bool
@dataclass
class A_:
"""simple docstring"""
a_ : int = 42
a_ : str = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class A_:
"""simple docstring"""
a_ : bool = False
a_ : bool = True
a_ : Optional[bool] = None
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Tuple = """titi"""
a_ : Tuple = """toto"""
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : str = """titi"""
a_ : Union[str, Any] = """toto"""
a_ : Union[str, Any] = 42
@dataclass
class A_:
"""simple docstring"""
a_ : BasicEnum = "toto"
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = BasicEnum(self.foo )
@dataclass
class A_:
"""simple docstring"""
a_ : MixedTypeEnum = "toto"
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = MixedTypeEnum(self.foo )
@dataclass
class A_:
"""simple docstring"""
a_ : Optional[int] = None
a_ : Optional[float] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """help message"""} )
a_ : Optional[str] = None
a_ : Optional[List[str]] = list_field(default=[] )
a_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class A_:
"""simple docstring"""
a_ : List[int] = list_field(default=[] )
a_ : List[int] = list_field(default=[1, 2, 3] )
a_ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
a_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_:
"""simple docstring"""
a_ : List[int] = field()
a_ : str = field()
a_ : BasicEnum = field()
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = BasicEnum(self.required_enum )
@dataclass
class A_:
"""simple docstring"""
a_ : int
a_ : "BasicEnum" = field()
a_ : "Optional[bool]" = None
a_ : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} )
a_ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class A_:
"""simple docstring"""
a_ : bool = False
a_ : bool = True
a_ : bool | None = None
@dataclass
class A_:
"""simple docstring"""
a_ : int | None = None
a_ : float | None = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """help message"""} )
a_ : str | None = None
a_ : list[str] | None = list_field(default=[] )
a_ : list[int] | None = list_field(default=[] )
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self , A , A ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_lowerCamelCase : Dict = {k: v for k, v in vars(A ).items() if k != 'container'}
_lowerCamelCase : Optional[Any] = {k: v for k, v in vars(A ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , A ) and yy.get('choices' , A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](A ) , yy['type'](A ) )
del xx["type"], yy["type"]
self.assertEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = HfArgumentParser(A )
_lowerCamelCase : Any = argparse.ArgumentParser()
expected.add_argument('--foo' , type=A , required=A )
expected.add_argument('--bar' , type=A , required=A )
expected.add_argument('--baz' , type=A , required=A )
expected.add_argument('--flag' , type=A , default=A , const=A , nargs='?' )
self.argparsersEqual(A , A )
_lowerCamelCase : Optional[Any] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((_lowerCamelCase) , ) : str = parser.parse_args_into_dataclasses(A , look_for_args_file=A )
self.assertFalse(example.flag )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = HfArgumentParser(A )
_lowerCamelCase : int = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=A )
expected.add_argument('--baz' , default='toto' , type=A , help='help message' )
self.argparsersEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=A , default=A , const=A , nargs='?' )
expected.add_argument('--baz' , type=A , default=A , const=A , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=A , dest='baz' )
expected.add_argument('--opt' , type=A , default=A )
_lowerCamelCase : Optional[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A )
for dataclass_type in dataclass_types:
_lowerCamelCase : List[Any] = HfArgumentParser(A )
self.argparsersEqual(A , A )
_lowerCamelCase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
_lowerCamelCase : List[Any] = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
_lowerCamelCase : Union[str, Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
_lowerCamelCase : Dict = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
_lowerCamelCase : Any = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = HfArgumentParser(A )
_lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(A , A )
_lowerCamelCase : Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
_lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_lowerCamelCase : List[Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
_lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_lowerCamelCase : Dict = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
_lowerCamelCase : List[Any] = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowerCAmelCase ( self ):
@dataclass
class A_:
"""simple docstring"""
a_ : Literal["titi", "toto", 42] = "toto"
_lowerCamelCase : Optional[int] = HfArgumentParser(A )
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(A , A )
_lowerCamelCase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
_lowerCamelCase : List[Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
_lowerCamelCase : Dict = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = HfArgumentParser(A )
_lowerCamelCase : int = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=A )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=A )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=A )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=A )
self.argparsersEqual(A , A )
_lowerCamelCase : List[str] = parser.parse_args([] )
self.assertEqual(
A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
_lowerCamelCase : Optional[int] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument('--foo' , default=A , type=A )
expected.add_argument('--bar' , default=A , type=A , help='help message' )
expected.add_argument('--baz' , default=A , type=A )
expected.add_argument('--ces' , nargs='+' , default=[] , type=A )
expected.add_argument('--des' , nargs='+' , default=[] , type=A )
_lowerCamelCase : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A )
for dataclass_type in dataclass_types:
_lowerCamelCase : Optional[Any] = HfArgumentParser(A )
self.argparsersEqual(A , A )
_lowerCamelCase : List[Any] = parser.parse_args([] )
self.assertEqual(A , Namespace(foo=A , bar=A , baz=A , ces=[] , des=[] ) )
_lowerCamelCase : Union[str, Any] = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(A , Namespace(foo=12 , bar=3.1_4 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = HfArgumentParser(A )
_lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=A , required=A )
expected.add_argument('--required_str' , type=A , required=A )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=A , )
self.argparsersEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = HfArgumentParser(A )
_lowerCamelCase : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=A , required=A )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=A , )
expected.add_argument('--opt' , type=A , default=A )
expected.add_argument('--baz' , default='toto' , type=A , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=A )
self.argparsersEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = HfArgumentParser(A )
_lowerCamelCase : str = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
_lowerCamelCase : Any = parser.parse_dict(A )[0]
_lowerCamelCase : Any = BasicExample(**A )
self.assertEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = HfArgumentParser(A )
_lowerCamelCase : Optional[Any] = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(A , parser.parse_dict , A , allow_extra_keys=A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = HfArgumentParser(A )
_lowerCamelCase : Tuple = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Union[str, Any] = os.path.join(A , 'temp_json' )
os.mkdir(A )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(A , A )
_lowerCamelCase : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
_lowerCamelCase : Union[str, Any] = BasicExample(**A )
self.assertEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = HfArgumentParser(A )
_lowerCamelCase : Dict = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : int = os.path.join(A , 'temp_yaml' )
os.mkdir(A )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(A , A )
_lowerCamelCase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
_lowerCamelCase : Any = BasicExample(**A )
self.assertEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = HfArgumentParser(A )
self.assertIsNotNone(A )
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
_UpperCAmelCase : int = tokenizer("Hello there" , return_tensors="np" ).input_ids
_UpperCAmelCase : Optional[int] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
_UpperCAmelCase : int = shift_tokens_right(lowerCAmelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_UpperCAmelCase : int = model(lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ ).logits
_UpperCAmelCase : str = optax.softmax_cross_entropy(lowerCAmelCase__ , onehot(lowerCAmelCase__ , logits.shape[-1] ) ).mean()
_UpperCAmelCase : Union[str, Any] = -(labels.shape[-1] * loss.item())
_UpperCAmelCase : Dict = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 ) | 494 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 494 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket")
@patch("builtins.open")
def lowerCamelCase__ ( _a , _a):
# ===== initialization =====
SCREAMING_SNAKE_CASE : int = Mock()
SCREAMING_SNAKE_CASE : Union[str, Any] = conn, Mock()
SCREAMING_SNAKE_CASE : Any = iter([1, None])
SCREAMING_SNAKE_CASE : Optional[Any] = lambda _a: next(_a)
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_a)
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once() | 702 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self : str , a : int = 16 , a : int = 88 , a : Optional[int] = None , a : Optional[int] = None , a : int = 1 , a : float = 0.0 , a : int = 32 , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : str = "geglu" , a : bool = True , a : bool = True , ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = attention_head_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE : Optional[Any] = in_channels
SCREAMING_SNAKE_CASE : List[Any] = torch.nn.GroupNorm(num_groups=a , num_channels=a , eps=1e-6 , affine=a )
SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(a , a )
# 3. Define transformers blocks
SCREAMING_SNAKE_CASE : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
a , a , a , dropout=a , cross_attention_dim=a , activation_fn=a , attention_bias=a , double_self_attention=a , norm_elementwise_affine=a , )
for d in range(a )
] )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(a , a )
def __UpperCamelCase ( self : Any , a : int , a : Optional[int]=None , a : List[str]=None , a : List[str]=None , a : Union[str, Any]=1 , a : Any=None , a : bool = True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = hidden_states.shape
SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states[None, :].reshape(a , a , a , a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
SCREAMING_SNAKE_CASE : Tuple = self.norm(a )
SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , a , a )
SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(a )
# 2. Blocks
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE : Optional[int] = block(
a , encoder_hidden_states=a , timestep=a , cross_attention_kwargs=a , class_labels=a , )
# 3. Output
SCREAMING_SNAKE_CASE : List[str] = self.proj_out(a )
SCREAMING_SNAKE_CASE : int = (
hidden_states[None, None, :]
.reshape(a , a , a , a , a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
SCREAMING_SNAKE_CASE : Dict = hidden_states.reshape(a , a , a , a )
SCREAMING_SNAKE_CASE : int = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a ) | 193 | 0 |
'''simple docstring'''
UpperCamelCase__: List[str] = [0, 2, 4, 6, 8]
UpperCamelCase__: Optional[Any] = [1, 3, 5, 7, 9]
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCAmelCase : Any = 0
for digit in range(10 ):
UpperCAmelCase : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCAmelCase , _lowerCAmelCase )
return result
UpperCAmelCase : Optional[Any] = 0
for digita in range(10 ):
UpperCAmelCase : List[Any] = digita
if (remainder + digita) % 2 == 0:
UpperCAmelCase : Any = ODD_DIGITS
else:
UpperCAmelCase : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
UpperCAmelCase : Tuple = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCAmelCase , _lowerCAmelCase , )
return result
def snake_case_ ( _lowerCAmelCase : int = 9 ) -> int:
UpperCAmelCase : int = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCAmelCase , 0 , [0] * length , _lowerCAmelCase )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 127 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : List[str] , __snake_case : Any ) -> List[Any]:
UpperCAmelCase : Any = data
UpperCAmelCase : Optional[Any] = [0X6745_2301, 0Xefcd_ab89, 0X98ba_dcfe, 0X1032_5476, 0Xc3d2_e1f0]
@staticmethod
def A ( __snake_case : List[str] , __snake_case : Any ) -> int:
return ((n << b) | (n >> (32 - b))) & 0Xffff_ffff
def A ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Dict = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase : int = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def A ( self : str ) -> List[Any]:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def A ( self : List[str] , __snake_case : int ) -> Optional[Any]:
UpperCAmelCase : List[Any] = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase : Any = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Tuple = self.padding()
UpperCAmelCase : Optional[Any] = self.split_blocks()
for block in self.blocks:
UpperCAmelCase : str = self.expand_block(__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase : Optional[int] = (b & c) | ((~b) & d)
UpperCAmelCase : List[str] = 0X5a82_7999
elif 20 <= i < 40:
UpperCAmelCase : List[str] = b ^ c ^ d
UpperCAmelCase : List[Any] = 0X6ed9_eba1
elif 40 <= i < 60:
UpperCAmelCase : Union[str, Any] = (b & c) | (b & d) | (c & d)
UpperCAmelCase : Dict = 0X8f1b_bcdc
elif 60 <= i < 80:
UpperCAmelCase : Optional[int] = b ^ c ^ d
UpperCAmelCase : Optional[int] = 0Xca62_c1d6
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xffff_ffff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase : Tuple = (
self.h[0] + a & 0Xffff_ffff,
self.h[1] + b & 0Xffff_ffff,
self.h[2] + c & 0Xffff_ffff,
self.h[3] + d & 0Xffff_ffff,
self.h[4] + e & 0Xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : int = b'''Test String'''
assert SHAaHash(_lowerCAmelCase ).final_hash() == hashlib.shaa(_lowerCAmelCase ).hexdigest() # noqa: S324
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Tuple = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase : List[str] = parser.parse_args()
UpperCAmelCase : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase : Dict = f.read()
else:
UpperCAmelCase : Optional[Any] = bytes(_lowerCAmelCase , '''utf-8''' )
print(SHAaHash(_lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 127 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _A ( unittest.TestCase ):
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowercase = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tiny-gpt2"""
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ):
"""simple docstring"""
lowercase = """sgugger/tiny-distilbert-classification"""
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , only_pretrain_model=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tiny-gpt2"""
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , torchscript=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tiny-gpt2"""
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , fpaa=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tiny-gpt2"""
lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
# set architectures equal to `None`
lowercase = None
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase , configs=[config] )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tiny-gpt2"""
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tiny-gpt2"""
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCAmelCase , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tiny-gpt2"""
lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase , configs=[config] )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tinier_bart"""
lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase , configs=[config] )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tiny-gpt2"""
lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase , configs=[config] )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tinier_bart"""
lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase , configs=[config] )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , save_to_csv=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(__lowerCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(__lowerCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(__lowerCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(__lowerCAmelCase , """env.csv""" ) , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , """env.csv""" ) ).exists() )
def A__ ( self ):
"""simple docstring"""
lowercase = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__lowerCAmelCase ):
self.assertTrue(hasattr(__lowerCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """current""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCAmelCase , """log.txt""" ) , log_print=__lowerCAmelCase , trace_memory_line_by_line=__lowerCAmelCase , multi_process=__lowerCAmelCase , )
lowercase = PyTorchBenchmark(__lowerCAmelCase )
lowercase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , """log.txt""" ) ).exists() )
| 710 | """simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowerCAmelCase : Optional[Any] =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowerCAmelCase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowerCAmelCase : Optional[int] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowerCAmelCase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowerCAmelCase : Optional[int] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowerCAmelCase : int =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowerCAmelCase : List[str] =(
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def UpperCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase = randrange(len(lowerCAmelCase__ ) ), randrange(len(lowerCAmelCase__ ) )
lowercase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
lowercase , lowercase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 1_0_0 ) -> Optional[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(lowerCAmelCase__ ))
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ) -> Optional[int]:
'''simple docstring'''
lowercase = PokerHand(lowerCAmelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any ) -> List[str]:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> Any:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
def UpperCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
lowercase = [PokerHand(lowerCAmelCase__ ) for hand in SORTED_HANDS]
lowercase = poker_hands.copy()
shuffle(lowerCAmelCase__ )
lowercase = chain(sorted(lowerCAmelCase__ ) )
for index, hand in enumerate(lowerCAmelCase__ ):
assert hand == poker_hands[index]
def UpperCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
lowercase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowerCAmelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = PokerHand("""2C 4S AS 3D 5C""" )
lowercase = True
lowercase = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
lowercase = 0
lowercase = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
lowercase = os.path.join(lowerCAmelCase__ , """poker_hands.txt""" )
with open(lowerCAmelCase__ ) as file_hand:
for line in file_hand:
lowercase = line[:1_4].strip()
lowercase = line[1_5:].strip()
lowercase , lowercase = PokerHand(lowerCAmelCase__ ), PokerHand(lowerCAmelCase__ )
lowercase = player.compare_with(lowerCAmelCase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 197 | 0 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class UpperCamelCase__ ( __lowercase ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Dict ):
'''simple docstring'''
super().__init__(*__a , **__a )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__a )
SCREAMING_SNAKE_CASE : str = self.values[key]
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (
sum(self.charge_factor - len(__a ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=None ):
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__a ) == 0
):
return key
return super()._collision_resolution(__a , __a )
| 379 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def snake_case ( UpperCamelCase__ : str ) -> Any:
lowerCamelCase : Dict = model.config
lowerCamelCase : Dict = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowerCamelCase : Dict = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def snake_case ( UpperCamelCase__ : str ) -> List[str]:
if "encoder.model" in name:
lowerCamelCase : Optional[Any] = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowerCamelCase : Tuple = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowerCamelCase : Optional[int] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowerCamelCase : str = """encoder.""" + name
if "attn.proj" in name:
lowerCamelCase : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowerCamelCase : List[str] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase : str = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowerCamelCase : Optional[int] = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowerCamelCase : List[Any] = """encoder.layernorm.bias"""
return name
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[Any] = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
lowerCamelCase : Tuple = key.split(""".""" )
lowerCamelCase : Dict = int(key_split[3] )
lowerCamelCase : List[Any] = int(key_split[5] )
lowerCamelCase : Dict = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase : List[Any] = val[:dim, :]
lowerCamelCase : Any = val[dim : dim * 2, :]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : str = val[:dim]
lowerCamelCase : List[str] = val[dim : dim * 2]
lowerCamelCase : str = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCamelCase : Dict = val
return orig_state_dict
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=False ) -> Optional[int]:
# load original model
lowerCamelCase : Tuple = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
lowerCamelCase , lowerCamelCase : List[Any] = get_configs(UpperCamelCase__ )
lowerCamelCase : str = DonutSwinModel(UpperCamelCase__ )
lowerCamelCase : Optional[int] = MBartForCausalLM(UpperCamelCase__ )
lowerCamelCase : int = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
lowerCamelCase : Any = original_model.state_dict()
lowerCamelCase : str = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
lowerCamelCase : Optional[int] = load_dataset("""hf-internal-testing/example-documents""" )
lowerCamelCase : str = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowerCamelCase : List[Any] = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCamelCase : Dict = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCamelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCamelCase : Union[str, Any] = """When is the coffee break?"""
lowerCamelCase : int = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCamelCase : Dict = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCamelCase : Tuple = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCamelCase : Optional[int] = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCamelCase : Optional[int] = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCamelCase : Tuple = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowerCamelCase : Any = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
lowerCamelCase : str = original_model.encoder.model.patch_embed(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Optional[Any] = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
# verify encoder hidden states
lowerCamelCase : Union[str, Any] = original_model.encoder(UpperCamelCase__ )
lowerCamelCase : Tuple = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 )
# verify decoder hidden states
lowerCamelCase : Any = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
lowerCamelCase : Tuple = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
__lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
__lowerCamelCase :Tuple = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 222 | 0 |
import argparse
import os
import re
SCREAMING_SNAKE_CASE_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE_ = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE_ = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] , lowerCAmelCase: bool = False ) -> int:
with open(lowerCAmelCase , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase : Dict = f.read()
_UpperCAmelCase : int = content.split("\n" )
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Optional[Any] = 0
while line_idx < len(lowerCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCAmelCase : int = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCAmelCase : str = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCAmelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCAmelCase : Any = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : _re_identifier.search(lowerCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowerCAmelCase ) )
elif "\n".join(lowerCAmelCase ) != content:
return True
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: bool = False ) -> Tuple:
_UpperCAmelCase : Tuple = [os.path.join(lowerCAmelCase , lowerCAmelCase ) for f in os.listdir(lowerCAmelCase ) if f.endswith(".py" )]
_UpperCAmelCase : List[str] = [sort_auto_mapping(lowerCAmelCase , overwrite=lowerCAmelCase ) for fname in fnames]
if not overwrite and any(lowerCAmelCase ):
_UpperCAmelCase : str = [f for f, d in zip(lowerCAmelCase , lowerCAmelCase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowerCAmelCase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 467 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> int:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("String lengths must match!" )
_UpperCAmelCase : List[Any] = 0
for chara, chara in zip(lowerCAmelCase , lowerCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 467 | 1 |
from jiwer import compute_measures
import datasets
__magic_name__ : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__magic_name__ : Optional[int] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__magic_name__ : Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["wer"]
else:
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 280 |
import numpy as np
import datasets
A__ : int = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
A__ : Optional[int] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
A__ : Optional[int] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''', id='''sequence''' ), id='''X''' ),
} ), )
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : Optional[int] ):
'''simple docstring'''
# convert to numpy arrays
lowercase__ = np.array(lowerCamelCase )
lowercase__ = np.array(lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowercase__ = X - np.mean(lowerCamelCase )
lowercase__ = np.cov(reference_distribution.T )
try:
lowercase__ = np.linalg.inv(lowerCamelCase )
except np.linalg.LinAlgError:
lowercase__ = np.linalg.pinv(lowerCamelCase )
lowercase__ = np.dot(lowerCamelCase, lowerCamelCase )
lowercase__ = np.dot(lowerCamelCase, X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 183 | 0 |
'''simple docstring'''
import functools
def _lowerCAmelCase ( lowercase : list[int] , lowercase : list[int] ) ->int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(lowercase ) >= 3_6_6:
raise ValueError('''All days elements should be less than 366''' )
lowercase__ = set(lowercase )
@functools.cache
def dynamic_programming(lowercase : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
_lowerCAmelCase = "Input must be a string of 8 numbers plus letter"
_lowerCAmelCase = "TRWAGMYFPDXBNJZSQVHLCKE"
def _lowerCAmelCase ( lowercase : str ) ->bool:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
lowercase__ = F'''Expected string as input, found {type(lowercase ).__name__}'''
raise TypeError(lowercase )
lowercase__ = spanish_id.replace('''-''' , '''''' ).upper()
if len(lowercase ) != 9:
raise ValueError(lowercase )
try:
lowercase__ = int(spanish_id_clean[0:8] )
lowercase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowercase ) from ex
if letter.isdigit():
raise ValueError(lowercase )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
a_ = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
a_ = re.compile(r"""([a-z\d])([A-Z])""")
a_ = re.compile(r"""(?<!_)_(?!_)""")
a_ = re.compile(r"""(_{2,})""")
a_ = r"""^\w+(\.\w+)*$"""
a_ = r"""<>:/\|?*"""
def UpperCAmelCase_ ( __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = _uppercase_uppercase_re.sub(r'\1_\2' , __a )
_lowerCamelCase : Tuple = _lowercase_uppercase_re.sub(r'\1_\2' , __a )
return name.lower()
def UpperCAmelCase_ ( __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = _single_underscore_re.split(__a )
_lowerCamelCase : Tuple = [_multiple_underscores_re.split(__a ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__a ) if n != '' )
def UpperCAmelCase_ ( __a : List[Any] ):
'''simple docstring'''
if os.path.basename(__a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__a )
def UpperCAmelCase_ ( __a : Union[str, Any] , __a : Optional[int] ):
'''simple docstring'''
if os.path.basename(__a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __a ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(__a )}-{split}"
def UpperCAmelCase_ ( __a : Any , __a : Union[str, Any] , __a : List[Any] , __a : List[str]=None ):
'''simple docstring'''
_lowerCamelCase : List[Any] = filename_prefix_for_split(__a , __a )
if filetype_suffix:
prefix += f".{filetype_suffix}"
_lowerCamelCase : List[str] = os.path.join(__a , __a )
return f"{filepath}*"
def UpperCAmelCase_ ( __a : str , __a : List[Any] , __a : List[str] , __a : Tuple=None , __a : Tuple=None ):
'''simple docstring'''
_lowerCamelCase : Tuple = filename_prefix_for_split(__a , __a )
_lowerCamelCase : List[str] = os.path.join(__a , __a )
if shard_lengths:
_lowerCamelCase : Union[str, Any] = len(__a )
_lowerCamelCase : str = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__a )]
if filetype_suffix:
_lowerCamelCase : int = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
_lowerCamelCase : int = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 437 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class A__ :
def __init__( self , lowerCamelCase , lowerCamelCase = 13 , lowerCamelCase = 64 , lowerCamelCase = 2 , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 128 , lowerCamelCase=[16, 32, 64, 128] , lowerCamelCase = 7 , lowerCamelCase = 4 , lowerCamelCase = 37 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 10 , lowerCamelCase = 0.0_2 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 128 , lowerCamelCase = [2, 2, 2, 2] , lowerCamelCase = 2 , lowerCamelCase = 2 , ) -> int:
"""simple docstring"""
__magic_name__ : List[Any] = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Tuple = image_size
__magic_name__ : Any = patch_size
__magic_name__ : Dict = num_channels
__magic_name__ : Tuple = is_training
__magic_name__ : Dict = use_labels
__magic_name__ : List[str] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : int = intermediate_size
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : List[str] = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : List[Any] = initializer_range
__magic_name__ : List[str] = encoder_stride
__magic_name__ : List[str] = num_attention_outputs
__magic_name__ : str = embed_dim
__magic_name__ : Optional[Any] = embed_dim + 1
__magic_name__ : Union[str, Any] = resolution
__magic_name__ : Dict = depths
__magic_name__ : Dict = hidden_sizes
__magic_name__ : Any = dim
__magic_name__ : Any = mlp_expansion_ratio
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : str = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : Tuple = TFEfficientFormerModel(config=lowerCamelCase )
__magic_name__ : List[Any] = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Dict = self.type_sequence_label_size
__magic_name__ : Tuple = TFEfficientFormerForImageClassification(lowerCamelCase )
__magic_name__ : int = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Tuple = 1
__magic_name__ : int = TFEfficientFormerForImageClassification(lowerCamelCase )
__magic_name__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Union[str, Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : List[str] = self.prepare_config_and_inputs()
__magic_name__ : Dict = config_and_inputs
__magic_name__ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase__ : str =(
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple =(
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Dict =False
lowerCamelCase__ : Optional[Any] =False
lowerCamelCase__ : List[Any] =False
lowerCamelCase__ : str =False
lowerCamelCase__ : Optional[Any] =False
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Tuple = TFEfficientFormerModelTester(self )
__magic_name__ : Optional[Any] = ConfigTester(
self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowercase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def lowercase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[int] = model_class(lowerCamelCase )
__magic_name__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Dict = [*signature.parameters.keys()]
__magic_name__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[Any] = model_class(lowerCamelCase )
__magic_name__ : Dict = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
__magic_name__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ : Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
if hasattr(self.model_tester , '''encoder_seq_length''' ):
__magic_name__ : List[str] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''' ) and self.model_tester.chunk_length > 1:
__magic_name__ : List[str] = seq_length * self.model_tester.chunk_length
else:
__magic_name__ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
__magic_name__ : Optional[int] = getattr(self.model_tester , '''seq_length''' , lowerCamelCase )
__magic_name__ : int = getattr(self.model_tester , '''decoder_seq_length''' , lowerCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[int] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : List[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) -> Any:
"""simple docstring"""
__magic_name__ : List[Any] = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[Any] = TFEfficientFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = True
__magic_name__ : Optional[int] = getattr(self.model_tester , '''seq_length''' , lowerCamelCase )
__magic_name__ : int = getattr(self.model_tester , '''encoder_seq_length''' , lowerCamelCase )
__magic_name__ : Tuple = getattr(self.model_tester , '''key_length''' , lowerCamelCase )
__magic_name__ : List[Any] = getattr(self.model_tester , '''chunk_length''' , lowerCamelCase )
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes''' ):
__magic_name__ : int = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__magic_name__ : Dict = True
__magic_name__ : List[Any] = False
__magic_name__ : Tuple = True
__magic_name__ : str = model_class(lowerCamelCase )
__magic_name__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
__magic_name__ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[Any] = True
__magic_name__ : int = model_class(lowerCamelCase )
__magic_name__ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
__magic_name__ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__magic_name__ : int = model_class(lowerCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__magic_name__ : Tuple = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__magic_name__ : List[str] = model(lowerCamelCase )
self.assertTrue(outputs_dict is not None )
def lowerCAmelCase ( ) ->Optional[int]:
"""simple docstring"""
__magic_name__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def lowercase ( self ) -> int:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : str = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
__magic_name__ : List[Any] = self.default_image_processor
__magic_name__ : Optional[int] = prepare_img()
__magic_name__ : List[str] = image_processor(images=lowerCamelCase , return_tensors='''tf''' )
# forward pass
__magic_name__ : List[str] = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
__magic_name__ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__magic_name__ : List[Any] = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : Optional[int] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
__magic_name__ : Tuple = self.default_image_processor
__magic_name__ : Union[str, Any] = prepare_img()
__magic_name__ : Optional[Any] = image_processor(images=lowerCamelCase , return_tensors='''tf''' )
# forward pass
__magic_name__ : Optional[int] = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
__magic_name__ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__magic_name__ : Dict = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 705 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Dict:
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , '''decord''' )
self.check_model_type(lowerCamelCase )
def lowercase ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
__magic_name__ : List[str] = {}
if frame_sampling_rate is not None:
__magic_name__ : Optional[int] = frame_sampling_rate
if num_frames is not None:
__magic_name__ : Optional[Any] = num_frames
__magic_name__ : Union[str, Any] = {}
if top_k is not None:
__magic_name__ : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(lowerCamelCase , **lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 ) -> int:
"""simple docstring"""
if num_frames is None:
__magic_name__ : Any = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
__magic_name__ : str = BytesIO(requests.get(lowerCamelCase ).content )
__magic_name__ : Optional[int] = VideoReader(lowerCamelCase )
videoreader.seek(0 )
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Tuple = num_frames * frame_sampling_rate - 1
__magic_name__ : Tuple = np.linspace(lowerCamelCase , lowerCamelCase , num=lowerCamelCase , dtype=np.intaa )
__magic_name__ : Union[str, Any] = videoreader.get_batch(lowerCamelCase ).asnumpy()
__magic_name__ : List[str] = list(lowerCamelCase )
__magic_name__ : Tuple = self.image_processor(lowerCamelCase , return_tensors=self.framework )
return model_inputs
def lowercase ( self , lowerCamelCase ) -> str:
"""simple docstring"""
__magic_name__ : Union[str, Any] = self.model(**lowerCamelCase )
return model_outputs
def lowercase ( self , lowerCamelCase , lowerCamelCase=5 ) -> Optional[Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__magic_name__ : Dict = self.model.config.num_labels
if self.framework == "pt":
__magic_name__ : Tuple = model_outputs.logits.softmax(-1 )[0]
__magic_name__ , __magic_name__ : str = probs.topk(lowerCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__magic_name__ : List[str] = scores.tolist()
__magic_name__ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 336 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A ( __a ):
'''simple docstring'''
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as input_file:
lowercase__ = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
lowercase__ = input_file.read()
lowercase__ = regexp.search(lowerCAmelCase__ )
return match
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as input_file:
lowercase__ = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
lowercase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ = regexp.finditer(lowerCAmelCase__ )
lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = Path("""./datasets""" )
lowercase__ = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCAmelCase__ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Path("""./datasets""" )
lowercase__ = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCAmelCase__ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 15 |
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase , lowercase = [], []
while len(__magic_name__ ) > 1:
lowercase , lowercase = min(__magic_name__ ), max(__magic_name__ )
start.append(__magic_name__ )
end.append(__magic_name__ )
collection.remove(__magic_name__ )
collection.remove(__magic_name__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_snake_case : int = input("Enter numbers separated by a comma:\n").strip()
_snake_case : Dict = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 441 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowercase_ :
def __init__( self: Optional[int], _lowercase: Optional[int], _lowercase: str=13, _lowercase: List[Any]=7, _lowercase: Optional[int]=True, _lowercase: Optional[int]=True, _lowercase: Tuple=True, _lowercase: Tuple=True, _lowercase: Dict=99, _lowercase: str=32, _lowercase: int=2, _lowercase: List[str]=4, _lowercase: Union[str, Any]=37, _lowercase: Optional[Any]="gelu", _lowercase: str=0.1, _lowercase: Any=0.1, _lowercase: str=512, _lowercase: List[Any]=16, _lowercase: Optional[Any]=2, _lowercase: Optional[int]=0.02, _lowercase: Any=False, _lowercase: Union[str, Any]=True, _lowercase: str="None", _lowercase: Optional[int]=3, _lowercase: Tuple=4, _lowercase: List[str]=None, ):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = relative_attention
__lowerCAmelCase = position_biased_input
__lowerCAmelCase = pos_att_type
__lowerCAmelCase = scope
def _lowercase ( self: str):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size], self.type_sequence_label_size)
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
__lowerCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=_lowercase, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: Union[str, Any], _lowercase: List[Any], _lowercase: Optional[int], _lowercase: Optional[Any], _lowercase: List[str], _lowercase: Dict, _lowercase: Any, _lowercase: Any):
'''simple docstring'''
__lowerCAmelCase = TFDebertaVaModel(config=_lowercase)
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(_lowercase)
__lowerCAmelCase = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def _lowercase ( self: List[str], _lowercase: List[Any], _lowercase: int, _lowercase: int, _lowercase: str, _lowercase: List[Any], _lowercase: int, _lowercase: Tuple):
'''simple docstring'''
__lowerCAmelCase = TFDebertaVaForMaskedLM(config=_lowercase)
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCAmelCase = model(_lowercase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def _lowercase ( self: List[Any], _lowercase: Dict, _lowercase: List[str], _lowercase: Union[str, Any], _lowercase: Optional[Any], _lowercase: Optional[int], _lowercase: int, _lowercase: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDebertaVaForSequenceClassification(config=_lowercase)
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCAmelCase = model(_lowercase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def _lowercase ( self: str, _lowercase: Union[str, Any], _lowercase: str, _lowercase: Tuple, _lowercase: Dict, _lowercase: Optional[int], _lowercase: Union[str, Any], _lowercase: List[str]):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDebertaVaForTokenClassification(config=_lowercase)
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCAmelCase = model(_lowercase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def _lowercase ( self: Union[str, Any], _lowercase: Any, _lowercase: List[Any], _lowercase: Optional[int], _lowercase: List[str], _lowercase: Union[str, Any], _lowercase: Tuple, _lowercase: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = TFDebertaVaForQuestionAnswering(config=_lowercase)
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCAmelCase = model(_lowercase)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def _lowercase ( self: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__UpperCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _lowercase ( self: int):
'''simple docstring'''
__lowerCAmelCase = TFDebertaVaModelTester(self)
__lowerCAmelCase = ConfigTester(self, config_class=_lowercase, hidden_size=37)
def _lowercase ( self: Any):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: List[str]):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def _lowercase ( self: Tuple):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase)
def _lowercase ( self: int):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase)
def _lowercase ( self: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase)
def _lowercase ( self: str):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase)
@slow
def _lowercase ( self: Tuple):
'''simple docstring'''
__lowerCAmelCase = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""")
self.assertIsNotNone(_lowercase)
@require_tf
class lowercase_ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""")
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict):
'''simple docstring'''
__lowerCAmelCase = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""")
__lowerCAmelCase = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
__lowerCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCAmelCase = model(_lowercase, attention_mask=_lowercase)[0]
__lowerCAmelCase = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]])
tf.debugging.assert_near(output[:, 1:4, 1:4], _lowercase, atol=1e-4)
| 334 |
from __future__ import annotations
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return False
__lowerCAmelCase = len(UpperCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , UpperCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , UpperCamelCase__ )
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter numbers separated by comma:\n").strip()
__A : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
__A : Tuple = int(input("Enter the number to be found in the list:\n").strip())
__A : Dict = "" if binary_search(sequence, target) else "not "
print(f"""{target} was {not_str}found in {sequence}""")
| 334 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_A : Tuple = logging.get_logger(__name__)
_A : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_A : List[Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_A : Dict = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_A : int = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_A : str = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
_A : int = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
_A : Optional[int] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
_A : List[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_A : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_A : List[str] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCamelCase__ : Dict = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A : Optional[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_A : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_A : Optional[Any] = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
class __snake_case :
'''simple docstring'''
def __call__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
A_ , padding=A_ , truncation=A_ , max_length=A_ , return_tensors=A_ , return_attention_mask=A_ , **A_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ = titles if texts is None else texts
return super().__call__(
A_ , A_ , padding=A_ , truncation=A_ , max_length=A_ , return_tensors=A_ , return_attention_mask=A_ , **A_ , )
SCREAMING_SNAKE_CASE__ = titles if not isinstance(A_ , A_ ) else [titles]
SCREAMING_SNAKE_CASE__ = texts if not isinstance(A_ , A_ ) else [texts]
SCREAMING_SNAKE_CASE__ = len(A_ )
SCREAMING_SNAKE_CASE__ = questions if not isinstance(A_ , A_ ) else [questions] * n_passages
if len(A_ ) != len(A_ ):
raise ValueError(
f'''There should be as many titles than texts but got {len(A_ )} titles and {len(A_ )} texts.''' )
SCREAMING_SNAKE_CASE__ = super().__call__(A_ , A_ , padding=A_ , truncation=A_ )['''input_ids''']
SCREAMING_SNAKE_CASE__ = super().__call__(A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ )['''input_ids''']
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A_ , A_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ = attention_mask
return self.pad(A_ , padding=A_ , max_length=A_ , return_tensors=A_ )
def lowercase_ ( self , A_ , A_ , A_ = 16 , A_ = 64 , A_ = 4 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = reader_output[:3]
SCREAMING_SNAKE_CASE__ = len(A_ )
SCREAMING_SNAKE_CASE__ = sorted(range(A_ ) , reverse=A_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ = len(A_ )
SCREAMING_SNAKE_CASE__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A_ , top_spans=A_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A_ , start_index=A_ , end_index=A_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(A_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase_ ( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for start_index, start_score in enumerate(A_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ = sorted(A_ , key=lambda A_ : x[1] , reverse=A_ )
SCREAMING_SNAKE_CASE__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
SCREAMING_SNAKE_CASE__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Any = VOCAB_FILES_NAMES
lowerCamelCase__ : Dict = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Any = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[Any] = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : str = ["""input_ids""", """attention_mask"""]
| 100 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : str = {'vocab_file': 'spiece.model'}
lowerCamelCase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any=False , UpperCamelCase : Dict=True , UpperCamelCase : int=False , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : List[Any]="<sep>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : int="<cls>" , UpperCamelCase : int="<mask>" , UpperCamelCase : str=["<eop>", "<eod>"] , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
lowercase__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__ = jieba
lowercase__ = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__(self : Dict , UpperCamelCase : int ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
if self.remove_space:
lowercase__ = ''' '''.join(inputs.strip().split() )
else:
lowercase__ = inputs
lowercase__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase__ = unicodedata.normalize('''NFKD''' , UpperCamelCase )
lowercase__ = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] )
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = self.preprocess_text(UpperCamelCase )
lowercase__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
lowercase__ = []
for piece in pieces:
if len(UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase )
else:
new_pieces.append(UpperCamelCase )
return new_pieces
def UpperCamelCase__ (self : int , UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase )
def UpperCamelCase__ (self : str , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = ''''''.join(UpperCamelCase ).replace(UpperCamelCase , ''' ''' ).strip()
return out_string
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1]
return ([0] * len(UpperCamelCase )) + [1, 1]
def UpperCamelCase__ (self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ (self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def UpperCamelCase__ (self : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = super()._decode(*UpperCamelCase , **UpperCamelCase )
lowercase__ = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 460 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__magic_name__ : int = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCAmelCase ( snake_case__ : str )-> Optional[Any]:
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def lowerCAmelCase ( snake_case__ : Any )-> List[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def lowerCAmelCase ( snake_case__ : Optional[int] )-> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A_ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
def lowerCAmelCase ( snake_case__ : Tuple , snake_case__ : Dict )-> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A_ = 0
# Doctest custom flag to ignore output.
__magic_name__ : Optional[Any] = doctest.register_optionflag('IGNORE_RESULT')
__magic_name__ : Tuple = doctest.OutputChecker
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__magic_name__ : str = CustomOutputChecker
__magic_name__ : List[str] = HfDoctestModule
__magic_name__ : Tuple = HfDocTestParser
| 608 |
def lowerCAmelCase ( snake_case__ : list )-> list:
if len(snake_case__ ) <= 1:
return lst
A_ = 1
while i < len(snake_case__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A_ , A_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
A_ = 1
return lst
if __name__ == "__main__":
__magic_name__ : Any = input('Enter numbers separated by a comma:\n').strip()
__magic_name__ : Tuple = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 608 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
snake_case_ = [
(1_000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : int = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
lowercase__ : List[str] = 0
lowercase__ : int = 0
while place < len(SCREAMING_SNAKE_CASE_ ):
if (place + 1 < len(SCREAMING_SNAKE_CASE_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : str = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) : List[str] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
result.append(roman * factor )
if number == 0:
break
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline
_UpperCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_UpperCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_UpperCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase = False
@property
def UpperCamelCase ( self ) -> Any:
return 32
@property
def UpperCamelCase ( self ) -> Optional[int]:
return 32
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
return self.time_input_dim
@property
def UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def UpperCamelCase ( self ) -> List[str]:
return 1_00
@property
def UpperCamelCase ( self ) -> int:
torch.manual_seed(0 )
snake_case = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case = UNetaDConditionModel(**A__ )
return model
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self ) -> Any:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case = DDIMScheduler(**A__ )
snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCamelCase ( self , A__ , A__=0 ) -> Tuple:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A__ )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(A__ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ )
if str(A__ ).startswith('''mps''' ):
snake_case = torch.manual_seed(A__ )
else:
snake_case = torch.Generator(device=A__ ).manual_seed(A__ )
snake_case = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase ( self ) -> int:
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**A__ )
snake_case = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
snake_case = pipe(**self.get_dummy_inputs(A__ ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(A__ ) , return_dict=A__ , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
snake_case = torch.from_numpy(np.array(A__ ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = '''A robot, 4k photo'''
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A__ )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(A__ )
pipeline.set_progress_bar_config(disable=A__ )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
A__ , image=A__ , strength=0.8_5 , generator=A__ , negative_prompt='''''' , ).to_tuple()
snake_case = pipeline(
image=A__ , image_embeds=A__ , negative_image_embeds=A__ , hint=A__ , generator=A__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A__ , A__ )
| 709 |
'''simple docstring'''
def __UpperCamelCase ( a : int , a : int ) ->int:
while b:
snake_case , snake_case = b, a % b
return a
def __UpperCamelCase ( a : int , a : int ) ->int:
return a if b == 0 else euclidean_gcd_recursive(a , a % b )
def __UpperCamelCase ( ) ->Optional[Any]:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 44 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""audio_values""", """audio_mask"""]
def __init__( self: Any , a: str=2048 , a: List[Any]=1 , a: int=[16, 16] , a: str=128 , a: int=4_4100 , a: str=86 , a: Any=2048 , a: Optional[int]=0.0 , **a: Any , ):
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
__lowerCamelCase : List[str] = spectrogram_length
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : str = patch_size
__lowerCamelCase : int = feature_size // self.patch_size[1]
__lowerCamelCase : Optional[int] = n_fft
__lowerCamelCase : Optional[Any] = sampling_rate // hop_length_to_sampling_rate
__lowerCamelCase : Union[str, Any] = sampling_rate
__lowerCamelCase : Optional[int] = padding_value
__lowerCamelCase : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def _snake_case ( self: List[str] , a: np.array ):
__lowerCamelCase : Optional[int] = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
__lowerCamelCase : List[Any] = log_spec[:, :-1]
__lowerCamelCase : List[str] = log_spec - 2_0.0
__lowerCamelCase : Any = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self: Dict , a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a: Optional[Union[str, TensorType]] = None , a: Optional[bool] = True , a: Optional[int] = None , a: bool = False , a: bool = False , **a: Optional[Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowerCamelCase : int = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
__lowerCamelCase : Optional[int] = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase : Any = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowerCamelCase : Optional[int] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
__lowerCamelCase : Union[str, Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowerCamelCase : int = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowerCamelCase : Optional[int] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowerCamelCase : Any = np.array(a ).astype(np.floataa )
# convert into correct format for padding
__lowerCamelCase : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowerCamelCase : Tuple = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowerCamelCase : int = padded_audio_features * self.padding_value
for i in range(len(a ) ):
__lowerCamelCase : Any = audio_features[i]
__lowerCamelCase : Any = feature
# return as BatchFeature
if return_attention_mask:
__lowerCamelCase : Union[str, Any] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowerCamelCase : Dict = {'audio_values': padded_audio_features}
__lowerCamelCase : str = BatchFeature(data=a , tensor_type=a )
return encoded_inputs
| 669 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
__lowercase : Dict = LxmertTokenizer
__lowercase : Optional[Any] = LxmertTokenizerFast
__lowercase : Optional[int] = True
__lowercase : Tuple = True
def lowercase ( self ) -> str:
"""simple docstring"""
super().setUp()
_UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = "UNwant\u00E9d,running"
_UpperCamelCase = "unwanted, running"
return input_text, output_text
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer_class(self.vocab_file )
_UpperCamelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowercase ( self ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = "I was born in 92000, and this is falsé."
_UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 715 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
__lowercase : Union[str, Any] = AlbertTokenizer
__lowercase : Any = AlbertTokenizerFast
__lowercase : Dict = True
__lowercase : Union[str, Any] = True
__lowercase : str = True
def lowercase ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = AlbertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = "this is a test"
_UpperCamelCase = "this is a test"
return input_text, output_text
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = "<pad>"
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(lowerCamelCase_ ) , 3_00_00 )
def lowercase ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def lowercase ( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = "I was born in 92000, and this is falsé."
_UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_UpperCamelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [48, 25, 21, 12_89] )
_UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer(lowerCamelCase_ )
_UpperCamelCase = tokenizer.encode("sequence builders" )
_UpperCamelCase = tokenizer.encode("multi-sequence build" )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 589 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def snake_case ( A__ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def snake_case ( A__ ):
return (gray > 1_27) & (gray <= 2_55)
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : int = np.zeros_like(__A )
UpperCAmelCase_ : Optional[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase_ : Any = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase_ : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase_ : List[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCamelCase_ = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
lowerCamelCase_ = np.array(Image.open(lena_path))
# kernel to be applied
lowerCamelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCamelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCamelCase_ = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 95 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = RobertaTokenizer
__SCREAMING_SNAKE_CASE = RobertaTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = {'''cls_token''': '''<s>'''}
def __lowerCamelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCamelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__UpperCamelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCamelCase = {"""unk_token""": """<unk>"""}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def __lowerCamelCase ( self , **lowercase ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCamelCase ( self , **lowercase ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCamelCase ( self , lowercase ) -> Optional[int]:
__UpperCamelCase = """lower newer"""
__UpperCamelCase = """lower newer"""
return input_text, output_text
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__UpperCamelCase = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowercase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowercase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = self.tokenizer_class.from_pretrained("""roberta-base""" )
__UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowercase , add_prefix_space=lowercase )
__UpperCamelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase , add_prefix_space=lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = """Encode this sequence."""
__UpperCamelCase = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
__UpperCamelCase = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
__UpperCamelCase = tokenizer.convert_tokens_to_ids(lowercase )
__UpperCamelCase = """Encode <mask> sequence"""
__UpperCamelCase = """Encode <mask>sequence"""
__UpperCamelCase = tokenizer.encode(lowercase )
__UpperCamelCase = encoded.index(lowercase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
__UpperCamelCase = tokenizer.encode(lowercase )
__UpperCamelCase = encoded.index(lowercase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Any:
pass
def __lowerCamelCase ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = """A, <mask> AllenNLP sentence."""
__UpperCamelCase = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
__UpperCamelCase = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __lowerCamelCase ( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowercase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowercase )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__UpperCamelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase = f"{text_of_1_token} {text_of_1_token}"
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
| 601 | 0 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__UpperCAmelCase = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def A__ ( __lowerCamelCase=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__SCREAMING_SNAKE_CASE ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =None
UpperCAmelCase_ =None
def _UpperCamelCase ( self , _A , _A ) -> Any:
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = dataset_module_factory(__A , cache_dir=__A )
SCREAMING_SNAKE_CASE_ = import_main_class(dataset_module.module_path , dataset=__A )
SCREAMING_SNAKE_CASE_ = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE_ = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
SCREAMING_SNAKE_CASE_ = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
SCREAMING_SNAKE_CASE_ = dataset_module_factory('''wikipedia''', cache_dir=_lowercase )
SCREAMING_SNAKE_CASE_ = import_main_class(dataset_module.module_path )
SCREAMING_SNAKE_CASE_ = builder_cls(
cache_dir=_lowercase, config_name='''20220301.frr''', hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE_ = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE_ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = dataset_module_factory('''wikipedia''', cache_dir=_lowercase )
SCREAMING_SNAKE_CASE_ = import_main_class(dataset_module.module_path, dataset=_lowercase )
SCREAMING_SNAKE_CASE_ = builder_cls(
cache_dir=_lowercase, config_name='''20220301.frr''', hash=dataset_module.hash, )
SCREAMING_SNAKE_CASE_ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowercase, _lowercase )
assert "train" in ds
assert isinstance(ds['''train'''], _lowercase )
assert next(iter(ds['''train'''] ) )
| 718 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ =TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCamelCase ( self ) -> int:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 38015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 25506, '''token_str''': ''' accuser'''},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 38015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 25506,
'''token_str''': ''' accuser''',
},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 35676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13606, '''token_str''': ''' Clara'''},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
SCREAMING_SNAKE_CASE_ = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_A , _A )
@slow
@require_torch
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(_A )
@slow
@require_tf
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(_A )
def _UpperCamelCase ( self , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_A ) , [
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_A ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 12790,
'''token_str''': ''' Lyon''',
},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_A ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
self.run_pipeline_test(_A , [] )
@require_tf
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
self.run_pipeline_test(_A , [] )
def _UpperCamelCase ( self , _A , _A , _A ) -> List[str]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE_ = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def _UpperCamelCase ( self , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = fill_masker.tokenizer
SCREAMING_SNAKE_CASE_ = fill_masker.model
SCREAMING_SNAKE_CASE_ = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
SCREAMING_SNAKE_CASE_ = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
SCREAMING_SNAKE_CASE_ = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_A , [
[
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
],
[
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
],
] , )
with self.assertRaises(_A ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_A ):
fill_masker('''This is''' )
self.run_test_top_k(_A , _A )
self.run_test_targets(_A , _A )
self.run_test_top_k_targets(_A , _A )
self.fill_mask_with_duplicate_targets_and_top_k(_A , _A )
self.fill_mask_with_multiple_masks(_A , _A )
def _UpperCamelCase ( self , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tokenizer.get_vocab()
SCREAMING_SNAKE_CASE_ = sorted(vocab.keys() )[:2]
# Pipeline argument
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A , targets=_A )
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
SCREAMING_SNAKE_CASE_ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _A )
SCREAMING_SNAKE_CASE_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_A ) )
# Call argument
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_A )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
SCREAMING_SNAKE_CASE_ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _A )
SCREAMING_SNAKE_CASE_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_A ) )
# Score equivalence
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_A )
SCREAMING_SNAKE_CASE_ = [top_mask['''token_str'''] for top_mask in outputs]
SCREAMING_SNAKE_CASE_ = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_A ) == set(_A ):
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_A )
SCREAMING_SNAKE_CASE_ = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
# Raises with invalid
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='''''' )
def _UpperCamelCase ( self , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A , top_k=2 )
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
def _UpperCamelCase ( self , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tokenizer.get_vocab()
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
# top_k=2, ntargets=3
SCREAMING_SNAKE_CASE_ = sorted(vocab.keys() )[:3]
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_A )
# If we use the most probably targets, and filter differently, we should still
# have the same results
SCREAMING_SNAKE_CASE_ = [el['''token_str'''] for el in sorted(_A , key=lambda _A : x["score"] , reverse=_A )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_A ).issubset(_A ):
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_A )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.get_vocab()
# String duplicates + id duplicates
SCREAMING_SNAKE_CASE_ = sorted(vocab.keys() )[:3]
SCREAMING_SNAKE_CASE_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
SCREAMING_SNAKE_CASE_ = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_A , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_A ) , 3 )
def _UpperCamelCase ( self , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE_ = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_A , [
[
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
],
[
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
],
[
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
],
] , )
| 597 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''MobileViTFeatureExtractor''']
_lowerCAmelCase = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowerCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase__ : Optional[Any] = g.get_repo("""huggingface/diffusers""" )
lowerCAmelCase__ : Tuple = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase__ : Optional[int] = sorted(issue.get_comments() , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase )
lowerCAmelCase__ : Any = comments[0] if len(UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 565 | 1 |
from manim import *
class __lowerCAmelCase ( __a ):
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =Rectangle(height=0.5 , width=0.5 )
_lowercase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowercase =[mem.copy() for i in range(6 )]
_lowercase =[mem.copy() for i in range(6 )]
_lowercase =VGroup(*A__ ).arrange(A__ , buff=0 )
_lowercase =VGroup(*A__ ).arrange(A__ , buff=0 )
_lowercase =VGroup(A__ , A__ ).arrange(A__ , buff=0 )
_lowercase =Text('CPU' , font_size=24 )
_lowercase =Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A__ )
_lowercase =[mem.copy() for i in range(1 )]
_lowercase =VGroup(*A__ ).arrange(A__ , buff=0 )
_lowercase =Text('GPU' , font_size=24 )
_lowercase =Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ )
gpu.align_to(A__ , A__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(A__ )
_lowercase =[mem.copy() for i in range(6 )]
_lowercase =VGroup(*A__ ).arrange(A__ , buff=0 )
_lowercase =Text('Model' , font_size=24 )
_lowercase =Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(A__ , run_time=1 ) , Create(A__ , run_time=1 ) , Create(A__ , run_time=1 ) , )
_lowercase =MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
_lowercase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase =MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(A__ , run_time=2.5 ) , Write(A__ ) , Write(A__ ) )
self.add(A__ )
_lowercase =[]
_lowercase =[]
_lowercase =[]
for i, rect in enumerate(A__ ):
_lowercase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A__ , opacity=0.7 )
cpu_target.move_to(A__ )
cpu_target.generate_target()
_lowercase =0.46 / 4
_lowercase =0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=A__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=A__ , buff=0.0 )
cpu_targs.append(A__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A__ ) )
second_animations.append(MoveToTarget(A__ , run_time=1.5 ) )
self.play(*A__ )
self.play(*A__ )
self.wait()
| 700 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = OpenAIGPTTokenizer
_a = OpenAIGPTTokenizerFast
_a = True
_a = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_lowercase =dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
_lowercase =['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
return "lower newer", "lower newer"
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowercase ='lower'
_lowercase =['low', 'er</w>']
_lowercase =tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
_lowercase =tokens + ['<unk>']
_lowercase =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def A__ ( self , lowerCAmelCase=15 ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# Simple input
_lowercase ='This is a simple input'
_lowercase =['This is a simple input 1', 'This is a simple input 2']
_lowercase =('This is a simple input', 'This is a pair')
_lowercase =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' , )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
pass
| 380 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = GPTSwaTokenizer
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Union[str, Any] = False
def lowercase ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = GPTSwaTokenizer(snake_case_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : int , snake_case_ : Dict ):
_UpperCAmelCase = "This is a test"
_UpperCAmelCase = "This is a test"
return input_text, output_text
def lowercase ( self : str ):
_UpperCAmelCase = "<s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(snake_case_ ) , 2_0_0_0 )
def lowercase ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def lowercase ( self : List[str] ):
_UpperCAmelCase = GPTSwaTokenizer(snake_case_ )
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
snake_case_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
# fmt: off
self.assertListEqual(
snake_case_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = GPTSwaTokenizer(snake_case_ )
_UpperCAmelCase = ["This is a test", "I was born in 92000, and this is falsé."]
_UpperCAmelCase = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case_ , snake_case_ ):
self.assertListEqual(tokenizer.encode_fast(snake_case_ ) , snake_case_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case_ , snake_case_ ):
self.assertEqual(tokenizer.decode_fast(snake_case_ ) , snake_case_ )
@slow
def lowercase ( self : Dict ):
_UpperCAmelCase = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
_UpperCAmelCase = {"input_ids": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=snake_case_ , )
| 236 |
'''simple docstring'''
from maths.prime_check import is_prime
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_UpperCAmelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(__lowercase )
if is_prime(__lowercase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
# General docstring
lowerCamelCase_ : Tuple = """RegNetConfig"""
# Base docstring
lowerCamelCase_ : List[Any] = """facebook/regnet-y-040"""
lowerCamelCase_ : Any = [1, 10_88, 7, 7]
# Image classification docstring
lowerCamelCase_ : Optional[Any] = """facebook/regnet-y-040"""
lowerCamelCase_ : List[str] = """tabby, tabby cat"""
lowerCamelCase_ : Any = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : int , snake_case_ : int = 3 , snake_case_ : int = 1 , snake_case_ : int = 1 , snake_case_ : Optional[str] = "relu" , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase_: int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase_: Optional[Any] = tf.keras.layers.ConvaD(
filters=snake_case_ , kernel_size=snake_case_ , strides=snake_case_ , padding="""VALID""" , groups=snake_case_ , use_bias=snake_case_ , name="""convolution""" , )
UpperCamelCase_: List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
UpperCamelCase_: Dict = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : str , snake_case_ : Union[str, Any] ):
UpperCamelCase_: Dict = self.convolution(self.padding(snake_case_ ) )
UpperCamelCase_: int = self.normalization(snake_case_ )
UpperCamelCase_: int = self.activation(snake_case_ )
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : RegNetConfig , **snake_case_ : Any ):
super().__init__(**snake_case_ )
UpperCamelCase_: Union[str, Any] = config.num_channels
UpperCamelCase_: List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str ):
UpperCamelCase_: str = shape_list(snake_case_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase_: str = tf.transpose(snake_case_ , perm=(0, 2, 3, 1) )
UpperCamelCase_: str = self.embedder(snake_case_ )
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : int , snake_case_ : int = 2 , **snake_case_ : Optional[Any] ):
super().__init__(**snake_case_ )
UpperCamelCase_: str = tf.keras.layers.ConvaD(
filters=snake_case_ , kernel_size=1 , strides=snake_case_ , use_bias=snake_case_ , name="""convolution""" )
UpperCamelCase_: int = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
def lowerCAmelCase__ ( self : Any , snake_case_ : tf.Tensor , snake_case_ : bool = False ):
return self.normalization(self.convolution(snake_case_ ) , training=snake_case_ )
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : int , snake_case_ : int , **snake_case_ : List[str] ):
super().__init__(**snake_case_ )
UpperCamelCase_: str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case_ , name="""pooler""" )
UpperCamelCase_: List[Any] = [
tf.keras.layers.ConvaD(filters=snake_case_ , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=snake_case_ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
UpperCamelCase_: List[str] = self.pooler(snake_case_ )
for layer_module in self.attention:
UpperCamelCase_: Union[str, Any] = layer_module(snake_case_ )
UpperCamelCase_: Dict = hidden_state * pooled
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , snake_case_ : RegNetConfig , snake_case_ : int , snake_case_ : int , snake_case_ : int = 1 , **snake_case_ : Dict ):
super().__init__(**snake_case_ )
UpperCamelCase_: List[str] = in_channels != out_channels or stride != 1
UpperCamelCase_: Tuple = max(1 , out_channels // config.groups_width )
UpperCamelCase_: Tuple = (
TFRegNetShortCut(snake_case_ , stride=snake_case_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase_: Any = [
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
snake_case_ , stride=snake_case_ , groups=snake_case_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=snake_case_ , name="""layer.2""" ),
]
UpperCamelCase_: Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Tuple ):
UpperCamelCase_: Optional[Any] = hidden_state
for layer_module in self.layers:
UpperCamelCase_: Union[str, Any] = layer_module(snake_case_ )
UpperCamelCase_: Optional[int] = self.shortcut(snake_case_ )
hidden_state += residual
UpperCamelCase_: List[str] = self.activation(snake_case_ )
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : RegNetConfig , snake_case_ : int , snake_case_ : int , snake_case_ : int = 1 , **snake_case_ : List[Any] ):
super().__init__(**snake_case_ )
UpperCamelCase_: Union[str, Any] = in_channels != out_channels or stride != 1
UpperCamelCase_: str = max(1 , out_channels // config.groups_width )
UpperCamelCase_: List[str] = (
TFRegNetShortCut(snake_case_ , stride=snake_case_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
UpperCamelCase_: str = [
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
snake_case_ , stride=snake_case_ , groups=snake_case_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(snake_case_ , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=snake_case_ , name="""layer.3""" ),
]
UpperCamelCase_: List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = hidden_state
for layer_module in self.layers:
UpperCamelCase_: str = layer_module(snake_case_ )
UpperCamelCase_: Optional[int] = self.shortcut(snake_case_ )
hidden_state += residual
UpperCamelCase_: Optional[int] = self.activation(snake_case_ )
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : RegNetConfig , snake_case_ : int , snake_case_ : int , snake_case_ : int = 2 , snake_case_ : int = 2 , **snake_case_ : Any ):
super().__init__(**snake_case_ )
UpperCamelCase_: Dict = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
UpperCamelCase_: str = [
# downsampling is done in the first layer with stride of 2
layer(snake_case_ , snake_case_ , snake_case_ , stride=snake_case_ , name="""layers.0""" ),
*[layer(snake_case_ , snake_case_ , snake_case_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[Any] ):
for layer_module in self.layers:
UpperCamelCase_: Any = layer_module(snake_case_ )
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : RegNetConfig , **snake_case_ : Optional[int] ):
super().__init__(**snake_case_ )
UpperCamelCase_: Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
UpperCamelCase_: Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case_ , snake_case_ , snake_case_ , depth=snake_case_ , name=f'''stages.{i+1}''' ) )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : tf.Tensor , snake_case_ : bool = False , snake_case_ : bool = True ):
UpperCamelCase_: int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase_: str = hidden_states + (hidden_state,)
UpperCamelCase_: List[Any] = stage_module(snake_case_ )
if output_hidden_states:
UpperCamelCase_: Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case_ , hidden_states=snake_case_ )
@keras_serializable
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
__UpperCamelCase : Any = RegNetConfig
def __init__( self : Dict , snake_case_ : Union[str, Any] , **snake_case_ : Tuple ):
super().__init__(**snake_case_ )
UpperCamelCase_: Optional[int] = config
UpperCamelCase_: Tuple = TFRegNetEmbeddings(snake_case_ , name="""embedder""" )
UpperCamelCase_: Union[str, Any] = TFRegNetEncoder(snake_case_ , name="""encoder""" )
UpperCamelCase_: Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case_ , name="""pooler""" )
@unpack_inputs
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : tf.Tensor , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , ):
UpperCamelCase_: Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_: str = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_: str = self.embedder(snake_case_ , training=snake_case_ )
UpperCamelCase_: List[str] = self.encoder(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , training=snake_case_ )
UpperCamelCase_: Optional[int] = encoder_outputs[0]
UpperCamelCase_: Tuple = self.pooler(snake_case_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase_: Union[str, Any] = tf.transpose(snake_case_ , perm=(0, 3, 1, 2) )
UpperCamelCase_: Any = tf.transpose(snake_case_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase_: List[Any] = tuple([tf.transpose(snake_case_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Any = RegNetConfig
__UpperCamelCase : Union[str, Any] = """regnet"""
__UpperCamelCase : str = """pixel_values"""
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCamelCase_ : Tuple = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase_ : Tuple = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _A , )
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : RegNetConfig , *snake_case_ : int , **snake_case_ : Union[str, Any] ):
super().__init__(snake_case_ , *snake_case_ , **snake_case_ )
UpperCamelCase_: int = TFRegNetMainLayer(snake_case_ , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : tf.Tensor , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : List[str]=False , ):
UpperCamelCase_: Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_: Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_: Union[str, Any] = self.regnet(
pixel_values=snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , training=snake_case_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _A , )
class _UpperCamelCase ( _A , _A ):
'''simple docstring'''
def __init__( self : Dict , snake_case_ : RegNetConfig , *snake_case_ : Tuple , **snake_case_ : Union[str, Any] ):
super().__init__(snake_case_ , *snake_case_ , **snake_case_ )
UpperCamelCase_: Union[str, Any] = config.num_labels
UpperCamelCase_: Optional[Any] = TFRegNetMainLayer(snake_case_ , name="""regnet""" )
# classification head
UpperCamelCase_: Tuple = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : tf.Tensor = None , snake_case_ : tf.Tensor = None , snake_case_ : bool = None , snake_case_ : bool = None , snake_case_ : Union[str, Any]=False , ):
UpperCamelCase_: int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_: Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_: List[str] = self.regnet(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , training=snake_case_ )
UpperCamelCase_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase_: List[Any] = self.classifier[0](snake_case_ )
UpperCamelCase_: Dict = self.classifier[1](snake_case_ )
UpperCamelCase_: List[Any] = None if labels is None else self.hf_compute_loss(labels=snake_case_ , logits=snake_case_ )
if not return_dict:
UpperCamelCase_: Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states )
| 710 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 0 |
'''simple docstring'''
from statistics import mean
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = 0
# Number of processes finished
A_ : Optional[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
A_ : Tuple = [0] * no_of_process
# List to include calculation results
A_ : Union[str, Any] = [0] * no_of_process
# Sort by arrival time.
A_ : List[str] = [burst_time[i] for i in np.argsort(lowercase_ )]
A_ : List[str] = [process_name[i] for i in np.argsort(lowercase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
A_ : List[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
A_ : Optional[Any] = arrival_time[i]
A_ : Union[str, Any] = 0
# Index showing the location of the process being performed
A_ : Optional[int] = 0
# Saves the current response ratio.
A_ : int = 0
for i in range(0 , lowercase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
A_ : Union[str, Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
A_ : Dict = temp
A_ : List[str] = i
# Calculate the turn around time
A_ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
A_ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = [0] * no_of_process
for i in range(0 , lowercase_ ):
A_ : List[str] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase :str = 5
lowerCamelCase :Optional[Any] = ['''A''', '''B''', '''C''', '''D''', '''E''']
lowerCamelCase :Dict = [1, 2, 3, 4, 5]
lowerCamelCase :List[Any] = [1, 2, 3, 4, 5]
lowerCamelCase :Optional[Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase :Optional[Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"
F"{turn_around_time[i]}\t\t\t{waiting_time[i]}"
)
print(F"average waiting time : {mean(waiting_time):.5f}")
print(F"average turn around time : {mean(turn_around_time):.5f}") | 667 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
lowercase =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
lowercase =0
lowercase =2
while digits < n:
index += 1
lowercase =len(str(fibonacci(lowercase_ ) ) )
return index
def UpperCamelCase ( lowercase_ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 72 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int = 1_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = set()
SCREAMING_SNAKE_CASE__ :Dict = 0
SCREAMING_SNAKE_CASE__ :Tuple = n + 1 # maximum limit
for a in range(2 , UpperCAmelCase__ ):
for b in range(2 , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :str = a**b # calculates the current power
collect_powers.add(UpperCAmelCase__ ) # adds the result to the set
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 712 | '''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : tuple , UpperCAmelCase__ : Path , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=False , ) -> Union[str, Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , use_external_data_format=UpperCAmelCase__ , enable_onnx_checker=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
else:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ :Optional[int] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
SCREAMING_SNAKE_CASE__ :Tuple = 'cpu'
SCREAMING_SNAKE_CASE__ :Tuple = Path(UpperCAmelCase__ )
# VAE DECODER
SCREAMING_SNAKE_CASE__ :Union[str, Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
SCREAMING_SNAKE_CASE__ :Optional[int] = vae_decoder.config.latent_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE__ :Dict = vae_decoder.decode
onnx_export(
UpperCAmelCase__ , model_args=(
torch.randn(1 , UpperCAmelCase__ , 2_5 , 2_5 ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCAmelCase__ , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCamelCase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 320 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : int , *A : Dict , **A : Union[str, Any] ):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , A , )
super().__init__(*A , **A )
| 244 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : List[str] = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ["""GLPNFeatureExtractor"""]
__SCREAMING_SNAKE_CASE : str = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 244 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a_ : Optional[List[str]] = None
a_ : Optional[int] = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a_ : Dict = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _snake_case :
_lowercase : bool = True
_lowercase : Optional[str] = None
# Automatically constructed
_lowercase : ClassVar[str] = "PIL.Image.Image"
_lowercase : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_lowercase : str = field(default='''Image''' , init=A__ , repr=A__ )
def __call__( self) -> List[Any]:
return self.pa_type
def SCREAMING_SNAKE_CASE__ ( self , a) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
if isinstance(a , a):
SCREAMING_SNAKE_CASE = np.array(a)
if isinstance(a , a):
return {"path": value, "bytes": None}
elif isinstance(a , a):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a)
elif isinstance(a , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a)
elif value.get('path') is not None and os.path.isfile(value['path']):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path')}
elif value.get('bytes') is not None or value.get('path') is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes'), "path": value.get('path')}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''')
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.')
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.')
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''')
else:
if is_local_path(a):
SCREAMING_SNAKE_CASE = PIL.Image.open(a)
else:
SCREAMING_SNAKE_CASE = path.split('::')[-1]
try:
SCREAMING_SNAKE_CASE = string_to_dict(a , config.HUB_DATASETS_URL)['repo_id']
SCREAMING_SNAKE_CASE = token_per_repo_id.get(a)
except ValueError:
SCREAMING_SNAKE_CASE = None
with xopen(a , 'rb' , use_auth_token=a) as f:
SCREAMING_SNAKE_CASE = BytesIO(f.read())
SCREAMING_SNAKE_CASE = PIL.Image.open(bytes_)
else:
SCREAMING_SNAKE_CASE = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE__ ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary'),
"path": Value('string'),
}
)
def SCREAMING_SNAKE_CASE__ ( self , a) -> pa.StructArray:
if pa.types.is_string(storage.type):
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.binary())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.string())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('bytes') >= 0:
SCREAMING_SNAKE_CASE = storage.field('bytes')
else:
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.binary())
if storage.type.get_field_index('path') >= 0:
SCREAMING_SNAKE_CASE = storage.field('path')
else:
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.string())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
SCREAMING_SNAKE_CASE = pa.array(
[encode_np_array(np.array(a))['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.string())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(a , self.pa_type)
def SCREAMING_SNAKE_CASE__ ( self , a) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(a):
with xopen(a , 'rb') as f:
SCREAMING_SNAKE_CASE = f.read()
return bytes_
SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(a) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(a , self.pa_type)
def lowerCamelCase__ ():
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE = image.format
else:
SCREAMING_SNAKE_CASE = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(_UpperCAmelCase , format=_UpperCAmelCase)
return buffer.getvalue()
def lowerCamelCase__ (_UpperCAmelCase):
if hasattr(_UpperCAmelCase , 'filename') and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase)}
def lowerCamelCase__ (_UpperCAmelCase):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
SCREAMING_SNAKE_CASE = array.dtype
SCREAMING_SNAKE_CASE = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE = dtype.kind
SCREAMING_SNAKE_CASE = dtype.itemsize
SCREAMING_SNAKE_CASE = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE = np.dtype('|u1')
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''')
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''')
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE = dtype_byteorder + dtype_kind + str(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.dtype(_UpperCAmelCase)
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''')
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''')
SCREAMING_SNAKE_CASE = PIL.Image.fromarray(array.astype(_UpperCAmelCase))
return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase)}
def lowerCamelCase__ (_UpperCAmelCase):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
if objs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = first_non_null_value(_UpperCAmelCase)
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCAmelCase , np.ndarray):
SCREAMING_SNAKE_CASE = no_op_if_value_is_null(_UpperCAmelCase)
return [obj_to_image_dict_func(_UpperCAmelCase) for obj in objs]
elif isinstance(_UpperCAmelCase , PIL.Image.Image):
SCREAMING_SNAKE_CASE = no_op_if_value_is_null(_UpperCAmelCase)
return [obj_to_image_dict_func(_UpperCAmelCase) for obj in objs]
else:
return objs
else:
return objs
| 444 |
from __future__ import annotations
import time
a_ : Tuple = list[tuple[int, int]]
a_ : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a_ : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _snake_case :
def __init__( self , a , a , a , a , a) -> int:
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = parent
class _snake_case :
def __init__( self , a , a) -> Dict:
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , a)
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , a)
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Path | None:
while self.node_queue:
SCREAMING_SNAKE_CASE = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(a)
SCREAMING_SNAKE_CASE = self.get_successors(a)
for node in successors:
self.node_queue.append(a)
if not self.reached:
return [self.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , a) -> list[Node]:
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(a) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(a , a , self.target.pos_y , self.target.pos_x , a))
return successors
def SCREAMING_SNAKE_CASE__ ( self , a) -> Path:
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
class _snake_case :
def __init__( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = BreadthFirstSearch(a , a)
SCREAMING_SNAKE_CASE = BreadthFirstSearch(a , a)
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE = self.fwd_bfs.node_queue.pop(0)
SCREAMING_SNAKE_CASE = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_bidirectional_path(
a , a)
SCREAMING_SNAKE_CASE = current_bwd_node
SCREAMING_SNAKE_CASE = current_fwd_node
SCREAMING_SNAKE_CASE = {
self.fwd_bfs: self.fwd_bfs.get_successors(a),
self.bwd_bfs: self.bwd_bfs.get_successors(a),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(a)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Path:
SCREAMING_SNAKE_CASE = self.fwd_bfs.retrace_path(a)
SCREAMING_SNAKE_CASE = self.bwd_bfs.retrace_path(a)
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a_ : Union[str, Any] = (0, 0)
a_ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a_ : Optional[Any] = time.time()
a_ : Union[str, Any] = BreadthFirstSearch(init, goal)
a_ : Optional[Any] = bfs.search()
a_ : List[str] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
a_ : Optional[int] = time.time()
a_ : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal)
a_ : List[Any] = bd_bfs.search()
a_ : List[str] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 444 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
lowercase__ : List[Any] = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict ):
lowercase__ : List[str] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def _lowerCamelCase ( lowerCamelCase__ : Dict ):
lowercase__ : List[str] = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def _lowerCamelCase ( ):
lowercase__ : int = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ):
lowercase__ : int = """imagenet-1k-id2label.json"""
lowercase__ : str = 10_00
lowercase__ : List[Any] = """huggingface/label-files"""
lowercase__ : List[Any] = num_labels
lowercase__ : Dict = json.load(open(cached_download(hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Tuple = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = CvtConfig(num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
lowercase__ : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
lowercase__ : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Any = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Union[str, Any] = [1_92, 7_68, 10_24]
lowercase__ : Dict = CvtForImageClassification(lowerCamelCase__ )
lowercase__ : Optional[int] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
lowercase__ : Optional[Any] = image_size
lowercase__ : str = torch.load(lowerCamelCase__ , map_location=torch.device("""cpu""" ) )
lowercase__ : List[Any] = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Tuple = list_of_state_dict + cls_token(lowerCamelCase__ )
lowercase__ : int = list_of_state_dict + embeddings(lowerCamelCase__ )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict = list_of_state_dict + attention(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[int] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 200 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , ) -> int:
lowercase__ : int = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : Tuple = image_size
lowercase__ : str = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : Union[str, Any] = do_resize
lowercase__ : Dict = size
lowercase__ : Optional[Any] = do_normalize
def UpperCAmelCase__( self ) -> Any:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a : List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : str = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase__( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__( self ) -> int:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) )
def UpperCAmelCase__( self ) -> Any:
lowercase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase__ : Optional[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[int] = os.path.join(lowerCamelCase__ , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase__ )
lowercase__ : str = self.image_processing_class.from_json_file(lowerCamelCase__ ).to_dict()
lowercase__ : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase__ )
lowercase__ : Union[str, Any] = self.image_processing_class.from_pretrained(lowerCamelCase__ ).to_dict()
lowercase__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCAmelCase__( self ) -> Dict:
pass
def _lowerCamelCase ( ):
lowercase__ : Tuple = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase__ : Optional[int] = Image.open(dataset[4]["""file"""] )
lowercase__ : Union[str, Any] = Image.open(dataset[5]["""file"""] )
lowercase__ : Optional[int] = [imagea, imagea]
return images
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__( self ) -> str:
lowercase__ : Optional[int] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase__ : int = prepare_images()
# test non-batched
lowercase__ : int = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase__ : Any = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase__ )
# test batched
lowercase__ : str = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase__ : Optional[int] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase__ ) | 200 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _snake_case ( _A ):
_A = ''
_A = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_A = None # compression type in fsspec. ex: "gzip"
_A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self ,UpperCamelCase = "" ,UpperCamelCase = None ,UpperCamelCase = None ,**UpperCamelCase ) -> Optional[int]:
super().__init__(self ,**UpperCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ :Tuple = fsspec.open(
UpperCamelCase ,mode="rb" ,protocol=UpperCamelCase ,compression=self.compression ,client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
snake_case__ :Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
snake_case__ :Optional[int] = (
self.compressed_name[: self.compressed_name.rindex("." )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case__ :Optional[int] = None
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase ) -> Optional[Any]:
return super()._strip_protocol(UpperCamelCase ).lstrip("/" )
def lowerCAmelCase_ ( self ) -> List[Any]:
if self.dir_cache is None:
snake_case__ :List[str] = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case__ :Optional[int] = {f['name']: f}
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Union[str, Any]:
return self.file.open().read()
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = "rb" ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=None ,**UpperCamelCase ,) -> str:
snake_case__ :List[str] = self._strip_protocol(UpperCamelCase )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class _snake_case ( _A ):
_A = 'bz2'
_A = 'bz2'
_A = '.bz2'
class _snake_case ( _A ):
_A = 'gzip'
_A = 'gzip'
_A = '.gz'
class _snake_case ( _A ):
_A = 'lz4'
_A = 'lz4'
_A = '.lz4'
class _snake_case ( _A ):
_A = 'xz'
_A = 'xz'
_A = '.xz'
class _snake_case ( _A ):
_A = 'zstd'
_A = 'zstd'
_A = '.zst'
def __init__( self ,UpperCamelCase ,UpperCamelCase = "rb" ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = DEFAULT_BLOCK_SIZE ,**UpperCamelCase ,) -> List[Any]:
super().__init__(
fo=UpperCamelCase ,mode=UpperCamelCase ,target_protocol=UpperCamelCase ,target_options=UpperCamelCase ,block_size=UpperCamelCase ,**UpperCamelCase ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ :Dict = self.file.__enter__
class _snake_case :
def __init__( self ,UpperCamelCase ) -> Tuple:
snake_case__ :Union[str, Any] = file_
def __enter__( self ) -> Any:
self._file.__enter__()
return self
def __exit__( self ,*UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]:
self._file.__exit__(*UpperCamelCase ,**UpperCamelCase )
def __iter__( self ) -> Optional[int]:
return iter(self._file )
def lowerCAmelCase_ ( self ) -> List[str]:
return next(self._file )
def __getattr__( self ,UpperCamelCase ) -> List[str]:
return getattr(self._file ,UpperCamelCase )
def fixed_enter(*UpperCamelCase ,**UpperCamelCase ):
return WrappedFile(_enter(*UpperCamelCase ,**UpperCamelCase ) )
snake_case__ :List[Any] = fixed_enter | 702 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 57 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.