code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowercase : List[Any] = parser.parse_args()
lowercase : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase : Any = CLIPImageProcessor()
lowercase : Any = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowercase : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 20 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( a__ , a__ , a__ ):
lowerCAmelCase :List[str] = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 5_0257 , _lowerCamelCase = 1024 , _lowerCamelCase = 768 , _lowerCamelCase = 12 , _lowerCamelCase = 12 , _lowerCamelCase = None , _lowerCamelCase = "gelu_new" , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 1e-5 , _lowerCamelCase = 0.02 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = False , ):
super().__init__()
UpperCAmelCase__ : Optional[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''')
UpperCAmelCase__ : str = prefix_inner_dim
UpperCAmelCase__ : Tuple = prefix_hidden_dim
UpperCAmelCase__ : Any = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase__ : Optional[Any] = (
nn.Linear(self.prefix_hidden_dim , _lowerCamelCase) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase__ : Optional[Any] = GPTaConfig(
vocab_size=_lowerCamelCase , n_positions=_lowerCamelCase , n_embd=_lowerCamelCase , n_layer=_lowerCamelCase , n_head=_lowerCamelCase , n_inner=_lowerCamelCase , activation_function=_lowerCamelCase , resid_pdrop=_lowerCamelCase , embd_pdrop=_lowerCamelCase , attn_pdrop=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , initializer_range=_lowerCamelCase , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , scale_attn_by_inverse_layer_idx=_lowerCamelCase , reorder_and_upcast_attn=_lowerCamelCase , )
UpperCAmelCase__ : int = GPTaLMHeadModel(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ):
UpperCAmelCase__ : Dict = self.transformer.transformer.wte(_lowerCamelCase)
UpperCAmelCase__ : Any = self.encode_prefix(_lowerCamelCase)
UpperCAmelCase__ : Tuple = self.decode_prefix(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
UpperCAmelCase__ : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
UpperCAmelCase__ : Tuple = torch.cat((dummy_token, input_ids) , dim=1)
UpperCAmelCase__ : Union[str, Any] = self.transformer(inputs_embeds=_lowerCamelCase , labels=_lowerCamelCase , attention_mask=_lowerCamelCase)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
return torch.zeros(_lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
return self.encode_prefix(_lowerCamelCase)
@torch.no_grad()
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = torch.split(_lowerCamelCase , 1 , dim=0)
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = []
for feature in features:
UpperCAmelCase__ : int = self.decode_prefix(feature.to(_lowerCamelCase)) # back to the clip feature
# Only support beam search for now
UpperCAmelCase__ , UpperCAmelCase__ : str = self.generate_beam(
input_embeds=_lowerCamelCase , device=_lowerCamelCase , eos_token_id=_lowerCamelCase)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
UpperCAmelCase__ : List[Any] = torch.stack(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = torch.stack(_lowerCamelCase)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = 5 , _lowerCamelCase = 67 , _lowerCamelCase = 1.0 , _lowerCamelCase = None , ):
UpperCAmelCase__ : Dict = eos_token_id
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Any = torch.ones(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.int)
UpperCAmelCase__ : Any = torch.zeros(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.bool)
if input_embeds is not None:
UpperCAmelCase__ : Optional[int] = input_embeds
else:
UpperCAmelCase__ : Any = self.transformer.transformer.wte(_lowerCamelCase)
for i in range(_lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = self.transformer(inputs_embeds=_lowerCamelCase)
UpperCAmelCase__ : List[str] = outputs.logits
UpperCAmelCase__ : List[str] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase__ : int = logits.softmax(-1).log()
if scores is None:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = logits.topk(_lowerCamelCase , -1)
UpperCAmelCase__ : int = generated.expand(_lowerCamelCase , *generated.shape[1:])
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
UpperCAmelCase__ : Any = next_tokens
else:
UpperCAmelCase__ : Tuple = tokens.expand(_lowerCamelCase , *tokens.shape[1:])
UpperCAmelCase__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1)
else:
UpperCAmelCase__ : Any = -float(np.inf)
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase__ : Optional[Any] = scores_sum / seq_lengths[:, None]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = scores_sum_average.view(-1).topk(_lowerCamelCase , -1)
UpperCAmelCase__ : str = next_tokens // scores_sum.shape[1]
UpperCAmelCase__ : Optional[int] = seq_lengths[next_tokens_source]
UpperCAmelCase__ : List[str] = next_tokens % scores_sum.shape[1]
UpperCAmelCase__ : List[Any] = next_tokens.unsqueeze(1)
UpperCAmelCase__ : Dict = tokens[next_tokens_source]
UpperCAmelCase__ : Optional[int] = torch.cat((tokens, next_tokens) , dim=1)
UpperCAmelCase__ : Optional[Any] = generated[next_tokens_source]
UpperCAmelCase__ : List[Any] = scores_sum_average * seq_lengths
UpperCAmelCase__ : Union[str, Any] = is_stopped[next_tokens_source]
UpperCAmelCase__ : Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
UpperCAmelCase__ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1)
UpperCAmelCase__ : Union[str, Any] = is_stopped + next_tokens.eq(_lowerCamelCase).squeeze()
if is_stopped.all():
break
UpperCAmelCase__ : Tuple = scores / seq_lengths
UpperCAmelCase__ : Union[str, Any] = scores.argsort(descending=_lowerCamelCase)
# tokens tensors are already padded to max_seq_length
UpperCAmelCase__ : Optional[Any] = [tokens[i] for i in order]
UpperCAmelCase__ : Optional[Any] = torch.stack(_lowerCamelCase , dim=0)
UpperCAmelCase__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths | 163 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : Any=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : int=2 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=None , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Any = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Optional[Any] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : List[Any] = hidden_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Tuple = type_vocab_size
__lowerCAmelCase : Any = type_sequence_label_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : List[str] = num_choices
__lowerCAmelCase : Optional[int] = scope
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Any = None
__lowerCAmelCase : List[str] = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = LlamaModel(config=_a )
model.to(_a )
model.eval()
__lowerCAmelCase : List[str] = model(_a , attention_mask=_a )
__lowerCAmelCase : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : Tuple = LlamaModel(_a )
model.to(_a )
model.eval()
__lowerCAmelCase : int = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__lowerCAmelCase : str = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
__lowerCAmelCase : List[str] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Any , ) -> str:
"""simple docstring"""
__lowerCAmelCase : str = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
__lowerCAmelCase : List[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Dict = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
__lowerCAmelCase : List[Any] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
__lowerCAmelCase : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase : Optional[Any] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
__lowerCAmelCase : str = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
__lowerCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
__lowerCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCamelCase : List[str] =(LlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase : List[str] =(
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : int =False
lowerCamelCase : List[str] =False
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = LlamaModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : List[str] = 3
__lowerCAmelCase : str = input_dict["""input_ids"""]
__lowerCAmelCase : List[Any] = input_ids.ne(1 ).to(_a )
__lowerCAmelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase : Any = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
__lowerCAmelCase : int = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Tuple = 3
__lowerCAmelCase : Tuple = """single_label_classification"""
__lowerCAmelCase : List[Any] = input_dict["""input_ids"""]
__lowerCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(_a )
__lowerCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
__lowerCAmelCase : Optional[int] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = 3
__lowerCAmelCase : Any = """multi_label_classification"""
__lowerCAmelCase : int = input_dict["""input_ids"""]
__lowerCAmelCase : Optional[int] = input_ids.ne(1 ).to(_a )
__lowerCAmelCase : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Tuple = ids_tensor([1, 10] , config.vocab_size )
__lowerCAmelCase : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase : Optional[int] = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
__lowerCAmelCase : Optional[Any] = original_model(_a ).last_hidden_state
__lowerCAmelCase : str = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase : str = {"""type""": scaling_type, """factor""": 10.0}
__lowerCAmelCase : List[Any] = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
__lowerCAmelCase : Optional[Any] = scaled_model(_a ).last_hidden_state
__lowerCAmelCase : Optional[Any] = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__lowerCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__lowerCAmelCase : Dict = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__lowerCAmelCase : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowerCAmelCase : Optional[int] = torch.tensor([-12.82_81, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.82_81, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__lowerCAmelCase : Optional[Any] = model(torch.tensor(_a ) )
# Expected mean on dim = -1
__lowerCAmelCase : List[Any] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowerCAmelCase : int = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__lowerCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__lowerCAmelCase : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
__lowerCAmelCase : Any = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowerCAmelCase : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__lowerCAmelCase : List[str] = model(torch.tensor(_a ) )
__lowerCAmelCase : str = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# fmt: off
__lowerCAmelCase : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__lowerCAmelCase : Dict = """Simply put, the theory of relativity states that """
__lowerCAmelCase : Optional[Any] = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__lowerCAmelCase : int = tokenizer.encode(_a , return_tensors="""pt""" )
__lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=_a )
# greedy generation outputs
__lowerCAmelCase : Dict = model.generate(_a , max_new_tokens=64 , top_p=_a , temperature=1 , do_sample=_a )
__lowerCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 350 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str=13 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : Any=32 , lowerCAmelCase : int=5 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Any=5_12 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[int]=4 , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : List[Any] = use_attention_mask
__lowerCAmelCase : List[Any] = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : Optional[int] = num_choices
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_attention_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[str] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] = config_and_inputs
__lowerCAmelCase : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Dict = config_and_inputs
__lowerCAmelCase : Any = True
__lowerCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int =True
lowerCamelCase : Any =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = FlaxBertModel.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
| 139 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : Tuple = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
A_ : Tuple = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
A_ : Union[str, Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__: int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: List[str] = SqueezeBertTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
A__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A__ ) != tokenize_chinese_chars
):
A__ : Any = getattr(A__ , normalizer_state.pop("""type""" ) )
A__ : Optional[Any] = do_lower_case
A__ : str = strip_accents
A__ : Dict = tokenize_chinese_chars
A__ : int = normalizer_class(**A__ )
A__ : Optional[Any] = do_lower_case
def __A ( self , A__ , A__=None ):
A__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A__ , A__ = None ):
A__ : Tuple = [self.sep_token_id]
A__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : List[str] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 192 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : Optional[Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = '''mgp-str'''
def __init__( self , A__=[32, 128] , A__=4 , A__=3 , A__=27 , A__=38 , A__=5_0257 , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=4.0 , A__=True , A__=False , A__=1e-5 , A__=0.0 , A__=0.0 , A__=0.0 , A__=False , A__=0.0_2 , **A__ , ):
super().__init__(**A__ )
A__ : Dict = image_size
A__ : int = patch_size
A__ : Dict = num_channels
A__ : List[Any] = max_token_length
A__ : str = num_character_labels
A__ : Tuple = num_bpe_labels
A__ : Optional[Any] = num_wordpiece_labels
A__ : Optional[int] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = mlp_ratio
A__ : Tuple = distilled
A__ : Union[str, Any] = layer_norm_eps
A__ : Tuple = drop_rate
A__ : List[str] = qkv_bias
A__ : Optional[Any] = attn_drop_rate
A__ : Union[str, Any] = drop_path_rate
A__ : Optional[Any] = output_aa_attentions
A__ : Optional[int] = initializer_range
| 192 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase_ ( snake_case__):
lowerCamelCase__ = 42
class UpperCAmelCase_ ( snake_case__ , snake_case__):
@register_to_config
def __init__( self, __a = 6_5536, __a = None, __a = 2, __a = 2, __a = 0, __a = "fourier", __a = True, __a = False, __a = 0.0, __a = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), __a = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), __a = "UNetMidBlock1D", __a = None, __a = (32, 32, 64), __a = None, __a = 8, __a = 1, __a = False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : str = sample_size
# time
if time_embedding_type == "fourier":
_lowerCAmelCase : int = GaussianFourierProjection(
embedding_size=8, set_W_to_weight=UpperCAmelCase_, log=UpperCAmelCase_, flip_sin_to_cos=UpperCAmelCase_)
_lowerCAmelCase : str = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCAmelCase : str = Timesteps(
block_out_channels[0], flip_sin_to_cos=UpperCAmelCase_, downscale_freq_shift=UpperCAmelCase_)
_lowerCAmelCase : Dict = block_out_channels[0]
if use_timestep_embedding:
_lowerCAmelCase : Union[str, Any] = block_out_channels[0] * 4
_lowerCAmelCase : str = TimestepEmbedding(
in_channels=UpperCAmelCase_, time_embed_dim=UpperCAmelCase_, act_fn=UpperCAmelCase_, out_dim=block_out_channels[0], )
_lowerCAmelCase : Tuple = nn.ModuleList([])
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[Any] = nn.ModuleList([])
_lowerCAmelCase : Dict = None
# down
_lowerCAmelCase : str = in_channels
for i, down_block_type in enumerate(UpperCAmelCase_):
_lowerCAmelCase : Optional[Any] = output_channel
_lowerCAmelCase : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCAmelCase : Tuple = i == len(UpperCAmelCase_) - 1
_lowerCAmelCase : List[str] = get_down_block(
UpperCAmelCase_, num_layers=UpperCAmelCase_, in_channels=UpperCAmelCase_, out_channels=UpperCAmelCase_, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, )
self.down_blocks.append(UpperCAmelCase_)
# mid
_lowerCAmelCase : Optional[Any] = get_mid_block(
UpperCAmelCase_, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=UpperCAmelCase_, add_downsample=UpperCAmelCase_, )
# up
_lowerCAmelCase : Tuple = list(reversed(UpperCAmelCase_))
_lowerCAmelCase : Any = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCAmelCase : Union[str, Any] = out_channels
else:
_lowerCAmelCase : Dict = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_):
_lowerCAmelCase : int = output_channel
_lowerCAmelCase : str = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_) - 1 else final_upsample_channels
)
_lowerCAmelCase : Tuple = i == len(UpperCAmelCase_) - 1
_lowerCAmelCase : Dict = get_up_block(
UpperCAmelCase_, num_layers=UpperCAmelCase_, in_channels=UpperCAmelCase_, out_channels=UpperCAmelCase_, temb_channels=block_out_channels[0], add_upsample=not is_final_block, )
self.up_blocks.append(UpperCAmelCase_)
_lowerCAmelCase : Union[str, Any] = output_channel
# out
_lowerCAmelCase : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
_lowerCAmelCase : List[Any] = get_out_block(
out_block_type=UpperCAmelCase_, num_groups_out=UpperCAmelCase_, embed_dim=block_out_channels[0], out_channels=UpperCAmelCase_, act_fn=UpperCAmelCase_, fc_dim=block_out_channels[-1] // 4, )
def snake_case__ ( self, __a, __a, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Dict = timestep
if not torch.is_tensor(UpperCAmelCase_):
_lowerCAmelCase : List[str] = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
elif torch.is_tensor(UpperCAmelCase_) and len(timesteps.shape) == 0:
_lowerCAmelCase : List[str] = timesteps[None].to(sample.device)
_lowerCAmelCase : List[str] = self.time_proj(UpperCAmelCase_)
if self.config.use_timestep_embedding:
_lowerCAmelCase : Any = self.time_mlp(UpperCAmelCase_)
else:
_lowerCAmelCase : Any = timestep_embed[..., None]
_lowerCAmelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
_lowerCAmelCase : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
_lowerCAmelCase : int = ()
for downsample_block in self.down_blocks:
_lowerCAmelCase : int = downsample_block(hidden_states=UpperCAmelCase_, temb=UpperCAmelCase_)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCAmelCase : List[str] = self.mid_block(UpperCAmelCase_, UpperCAmelCase_)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
_lowerCAmelCase : Any = down_block_res_samples[-1:]
_lowerCAmelCase : List[Any] = down_block_res_samples[:-1]
_lowerCAmelCase : str = upsample_block(UpperCAmelCase_, res_hidden_states_tuple=UpperCAmelCase_, temb=UpperCAmelCase_)
# 5. post-process
if self.out_block:
_lowerCAmelCase : Tuple = self.out_block(UpperCAmelCase_, UpperCAmelCase_)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase_)
| 365 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowercase : int = '''true'''
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=82 , __lowerCamelCase : Optional[Any]=16 ):
"""simple docstring"""
set_seed(42 )
lowerCamelCase__ : Dict =RegressionModel()
lowerCamelCase__ : List[str] =deepcopy(A_ )
lowerCamelCase__ : Optional[Any] =RegressionDataset(length=A_ )
lowerCamelCase__ : List[Any] =DataLoader(A_ , batch_size=A_ )
model.to(accelerator.device )
lowerCamelCase__ : str =accelerator.prepare(A_ , A_ )
return model, ddp_model, dataloader
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCamelCase__ : Union[str, Any] =load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowerCamelCase : Optional[Any] ):
lowerCamelCase__ : List[str] =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A_ , max_length=A_ )
return outputs
with accelerator.main_process_first():
lowerCamelCase__ : List[str] =dataset.map(
A_ , batched=A_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCamelCase__ : Dict =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase : Tuple ):
if use_longest:
return tokenizer.pad(A_ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(A_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(A_ , shuffle=A_ , collate_fn=A_ , batch_size=16 )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =Accelerator(dispatch_batches=A_ , split_batches=A_ )
lowerCamelCase__ : Optional[int] =get_dataloader(A_ , not dispatch_batches )
lowerCamelCase__ : Dict =AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=A_ )
lowerCamelCase__ : Dict =accelerator.prepare(A_ , A_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : str =[]
for batch in dataloader:
lowerCamelCase__ : Optional[Any] =batch.values()
with torch.no_grad():
lowerCamelCase__ : str =model(A_ )
lowerCamelCase__ : int =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCamelCase__ : str =[], []
for logit, targ in logits_and_targets:
logits.append(A_ )
targs.append(A_ )
lowerCamelCase__ : List[Any] =torch.cat(A_ ), torch.cat(A_ )
return logits, targs
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]=82 , __lowerCamelCase : int=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=16 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =get_basic_setup(A_ , A_ , A_ )
lowerCamelCase__ : Dict =generate_predictions(A_ , A_ , A_ )
assert (
len(A_ ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A_ )}'''
def snake_case__ ( __lowerCamelCase : Optional[int] = False , __lowerCamelCase : List[str] = False ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =evaluate.load('''glue''' , '''mrpc''' )
lowerCamelCase__ : int =get_mrpc_setup(A_ , A_ )
# First do baseline
lowerCamelCase__ : int =setup["no"]
model.to(A_ )
model.eval()
for batch in dataloader:
batch.to(A_ )
with torch.inference_mode():
lowerCamelCase__ : Any =model(**A_ )
lowerCamelCase__ : Optional[int] =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A_ , references=batch['''labels'''] )
lowerCamelCase__ : List[str] =metric.compute()
# Then do distributed
lowerCamelCase__ : List[Any] =setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCamelCase__ : int =model(**A_ )
lowerCamelCase__ : str =outputs.logits.argmax(dim=-1 )
lowerCamelCase__ : Optional[Any] =batch["labels"]
lowerCamelCase__ : Optional[int] =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A_ , references=A_ )
lowerCamelCase__ : Tuple =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[str] =Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(A_ , A_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCamelCase__ : int =Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(A_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCamelCase__ : Dict =Accelerator()
test_torch_metrics(A_ , 512 )
accelerator.state._reset_state()
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 238 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = (DDIMParallelScheduler,)
UpperCamelCase__ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Any = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase__: List[str] = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: int = 10, 0.0
UpperCamelCase__: List[Any] = self.dummy_model()
UpperCamelCase__: Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
UpperCamelCase__: Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
UpperCamelCase__: Tuple = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase , num_inference_steps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase , eta=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config()
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.scheduler_classes[0]
UpperCamelCase__: Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__: Any = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
UpperCamelCase__: Tuple = self.dummy_model()
UpperCamelCase__: Union[str, Any] = self.dummy_sample_deter
UpperCamelCase__: Dict = self.dummy_sample_deter + 0.1
UpperCamelCase__: Dict = self.dummy_sample_deter - 0.1
UpperCamelCase__: int = samplea.shape[0]
UpperCamelCase__: List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__: Union[str, Any] = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
UpperCamelCase__: str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__: Optional[int] = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowerCamelCase )
UpperCamelCase__: Dict = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = self.full_loop()
UpperCamelCase__: List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__: List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[int] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 149 | 0 |
import math
def _SCREAMING_SNAKE_CASE ( ) -> None:
__lowerCAmelCase : Union[str, Any] = input("""Enter message: """ )
__lowerCAmelCase : Any = int(input(F'''Enter key [2-{len(SCREAMING_SNAKE_CASE ) - 1}]: ''' ) )
__lowerCAmelCase : Union[str, Any] = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowerCAmelCase : List[str] = encrypt_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif mode.lower().startswith("""d""" ):
__lowerCAmelCase : str = decrypt_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + "|"}''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :str ) -> str:
__lowerCAmelCase : Dict = [""""""] * key
for col in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = col
while pointer < len(SCREAMING_SNAKE_CASE ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :str ) -> str:
__lowerCAmelCase : Optional[Any] = math.ceil(len(SCREAMING_SNAKE_CASE ) / key )
__lowerCAmelCase : List[Any] = key
__lowerCAmelCase : Any = (num_cols * num_rows) - len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [""""""] * num_cols
__lowerCAmelCase : Any = 0
__lowerCAmelCase : Any = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__lowerCAmelCase : Tuple = 0
row += 1
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 232 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class snake_case_ ( __lowercase ):
A_ = 'unispeech-sat'
def __init__( self : str , _snake_case : List[Any]=32 , _snake_case : Union[str, Any]=768 , _snake_case : Tuple=12 , _snake_case : Optional[int]=12 , _snake_case : Optional[Any]=3072 , _snake_case : Tuple="gelu" , _snake_case : int=0.1 , _snake_case : List[Any]=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : str=0.0 , _snake_case : List[str]=0.0 , _snake_case : int=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Optional[Any]=0.02 , _snake_case : int=1E-5 , _snake_case : Dict="group" , _snake_case : Optional[Any]="gelu" , _snake_case : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , _snake_case : int=(5, 2, 2, 2, 2, 2, 2) , _snake_case : int=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Any=False , _snake_case : Optional[Any]=128 , _snake_case : Tuple=16 , _snake_case : str=False , _snake_case : Dict=True , _snake_case : Tuple=0.05 , _snake_case : str=10 , _snake_case : Tuple=2 , _snake_case : List[Any]=0.0 , _snake_case : str=10 , _snake_case : Any=0 , _snake_case : List[Any]=320 , _snake_case : Union[str, Any]=2 , _snake_case : Dict=0.1 , _snake_case : Dict=100 , _snake_case : Union[str, Any]=256 , _snake_case : int=256 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[Any]="mean" , _snake_case : int=False , _snake_case : str=False , _snake_case : str=256 , _snake_case : List[Any]=(512, 512, 512, 512, 1500) , _snake_case : Optional[int]=(5, 3, 3, 1, 1) , _snake_case : Tuple=(1, 2, 3, 1, 1) , _snake_case : Dict=512 , _snake_case : Union[str, Any]=0 , _snake_case : List[str]=1 , _snake_case : Optional[Any]=2 , _snake_case : Optional[int]=504 , **_snake_case : Optional[int] , )->Union[str, Any]:
'''simple docstring'''
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : List[Any] = feat_extract_norm
__lowerCAmelCase : int = feat_extract_activation
__lowerCAmelCase : Union[str, Any] = list(_snake_case )
__lowerCAmelCase : str = list(_snake_case )
__lowerCAmelCase : Optional[Any] = list(_snake_case )
__lowerCAmelCase : Optional[int] = conv_bias
__lowerCAmelCase : Dict = num_conv_pos_embeddings
__lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
__lowerCAmelCase : Tuple = len(self.conv_dim )
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Optional[int] = hidden_dropout
__lowerCAmelCase : str = attention_dropout
__lowerCAmelCase : int = activation_dropout
__lowerCAmelCase : Union[str, Any] = feat_proj_dropout
__lowerCAmelCase : List[str] = final_dropout
__lowerCAmelCase : Dict = layerdrop
__lowerCAmelCase : Tuple = layer_norm_eps
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Optional[int] = num_clusters
__lowerCAmelCase : List[Any] = do_stable_layer_norm
__lowerCAmelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : Dict = apply_spec_augment
__lowerCAmelCase : List[Any] = mask_time_prob
__lowerCAmelCase : List[str] = mask_time_length
__lowerCAmelCase : Dict = mask_time_min_masks
__lowerCAmelCase : Tuple = mask_feature_prob
__lowerCAmelCase : List[str] = mask_feature_length
__lowerCAmelCase : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase : Optional[int] = num_codevectors_per_group
__lowerCAmelCase : List[Any] = num_codevector_groups
__lowerCAmelCase : int = contrastive_logits_temperature
__lowerCAmelCase : str = feat_quantizer_dropout
__lowerCAmelCase : int = num_negatives
__lowerCAmelCase : str = codevector_dim
__lowerCAmelCase : Any = proj_codevector_dim
__lowerCAmelCase : Any = diversity_loss_weight
# ctc loss
__lowerCAmelCase : Tuple = ctc_loss_reduction
__lowerCAmelCase : Any = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : List[str] = list(_snake_case )
__lowerCAmelCase : List[str] = list(_snake_case )
__lowerCAmelCase : Optional[int] = list(_snake_case )
__lowerCAmelCase : Optional[int] = xvector_output_dim
@property
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 232 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
# save results
if os.path.exists(UpperCAmelCase ):
if os.path.exists(os.path.join(UpperCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase , 'config.json' ) ):
os.remove(os.path.join(UpperCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
snake_case_ = 2
if unlogit:
snake_case_ = torch.pow(UpperCAmelCase , UpperCAmelCase )
snake_case_ = p * torch.log(UpperCAmelCase )
snake_case_ = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(UpperCAmelCase ) ) ) )
for row in range(len(UpperCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False ) -> Union[str, Any]:
snake_case_ , snake_case_ = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case_ = torch.zeros(UpperCAmelCase , UpperCAmelCase ).to(args.device )
snake_case_ = torch.zeros(UpperCAmelCase , UpperCAmelCase ).to(args.device )
if head_mask is None:
snake_case_ = torch.ones(UpperCAmelCase , UpperCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case_ = None
snake_case_ = 0.0
snake_case_ = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
snake_case_ = tuple(t.to(args.device ) for t in inputs )
((snake_case_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case_ = model(UpperCAmelCase , labels=UpperCAmelCase , head_mask=UpperCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case_ , snake_case_ , snake_case_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase ):
snake_case_ = entropy(attn.detach() , UpperCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case_ = 2
snake_case_ = torch.pow(torch.pow(UpperCAmelCase , UpperCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
snake_case_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(UpperCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(UpperCAmelCase )
logger.info('Head ranked by importance scores' )
snake_case_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case_ = torch.arange(
head_importance.numel() , device=args.device )
snake_case_ = head_ranks.view_as(UpperCAmelCase )
print_ad_tensor(UpperCAmelCase )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase )
snake_case_ = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , UpperCAmelCase , original_score * args.masking_threshold )
snake_case_ = torch.ones_like(UpperCAmelCase )
snake_case_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case_ = original_score
while current_score >= original_score * args.masking_threshold:
snake_case_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case_ = float('Inf' )
snake_case_ = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
snake_case_ = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
snake_case_ = new_head_mask.view(-1 )
snake_case_ = 0.0
snake_case_ = new_head_mask.view_as(UpperCAmelCase )
snake_case_ = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase )
# Compute metric and head importance again
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , head_mask=UpperCAmelCase )
snake_case_ = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(UpperCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
snake_case_ = datetime.now()
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , compute_importance=UpperCAmelCase , head_mask=UpperCAmelCase )
snake_case_ = 1 / loss
snake_case_ = datetime.now() - before_time
snake_case_ = sum(p.numel() for p in model.parameters() )
snake_case_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = [
v,
]
assert sum(len(UpperCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase )
snake_case_ = sum(p.numel() for p in model.parameters() )
snake_case_ = datetime.now()
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , compute_importance=UpperCAmelCase , head_mask=UpperCAmelCase , actually_pruned=UpperCAmelCase , )
snake_case_ = 1 / loss
snake_case_ = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCAmelCase , UpperCAmelCase , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCAmelCase , UpperCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(UpperCAmelCase , args.output_dir )
def UpperCAmelCase ( ) -> List[str]:
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=UpperCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=UpperCAmelCase , type=UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=UpperCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=UpperCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=UpperCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=UpperCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=UpperCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=UpperCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=UpperCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=UpperCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=UpperCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCAmelCase , default='' , help='Can be used for distant debugging.' )
snake_case_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case_ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
snake_case_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case_ = torch.device('cuda' , args.local_rank )
snake_case_ = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case_ = nn.parallel.DistributedDataParallel(
UpperCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase )
elif args.n_gpu > 1:
snake_case_ = nn.DataParallel(UpperCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase )
torch.save(UpperCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Prepare dataset
snake_case_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case_ = (torch.from_numpy(UpperCAmelCase ),)
snake_case_ = TensorDataset(*UpperCAmelCase )
snake_case_ = RandomSampler(UpperCAmelCase )
snake_case_ = DataLoader(UpperCAmelCase , sampler=UpperCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case_ = mask_heads(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
prune_heads(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 69 | """simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ) -> None:
create_state_space_tree(UpperCAmelCase , [] , 0 , [0 for i in range(len(UpperCAmelCase ) )] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None:
if index == len(UpperCAmelCase ):
print(UpperCAmelCase )
return
for i in range(len(UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case_ = True
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 , UpperCAmelCase )
current_sequence.pop()
snake_case_ = False
__UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
__UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 69 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = (DEISMultistepScheduler,)
a : List[str] = (("num_inference_steps", 25),)
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**_lowerCamelCase )
return config
def _UpperCAmelCase (self ,_lowerCamelCase=0 ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,_lowerCamelCase )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config(**_lowerCamelCase )
__lowercase = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
__lowercase = scheduler_class.from_pretrained(_lowerCamelCase )
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowercase , __lowercase = sample, sample
for t in range(_lowerCamelCase ,time_step + scheduler.config.solver_order + 1 ):
__lowercase = scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ).prev_sample
__lowercase = new_scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ,_lowerCamelCase=0 ,**_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,_lowerCamelCase )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
__lowercase = scheduler_class.from_pretrained(_lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowercase = scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ).prev_sample
__lowercase = new_scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCAmelCase (self ,_lowerCamelCase=None ,**_lowerCamelCase ) -> int:
'''simple docstring'''
if scheduler is None:
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**_lowerCamelCase )
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**_lowerCamelCase )
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = 10
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = model(_lowerCamelCase ,_lowerCamelCase )
__lowercase = scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ).prev_sample
return sample
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,_lowerCamelCase )
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowerCamelCase ,'''set_timesteps''' ):
scheduler.set_timesteps(_lowerCamelCase )
elif num_inference_steps is not None and not hasattr(_lowerCamelCase ,'''set_timesteps''' ):
__lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
__lowercase = scheduler.timesteps[5]
__lowercase = scheduler.timesteps[6]
__lowercase = scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ).prev_sample
__lowercase = scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowercase = self.full_loop(scheduler=_lowerCamelCase )
__lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
__lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
__lowercase = DEISMultistepScheduler.from_config(scheduler.config )
__lowercase = self.full_loop(scheduler=_lowerCamelCase )
__lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=_lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCamelCase ,prediction_type=_lowerCamelCase ,sample_max_value=_lowerCamelCase ,algorithm_type='''deis''' ,solver_order=_lowerCamelCase ,solver_type=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCamelCase ,solver_type=_lowerCamelCase ,prediction_type=_lowerCamelCase ,algorithm_type=_lowerCamelCase ,)
__lowercase = self.full_loop(
solver_order=_lowerCamelCase ,solver_type=_lowerCamelCase ,prediction_type=_lowerCamelCase ,algorithm_type=_lowerCamelCase ,)
assert not torch.isnan(_lowerCamelCase ).any(), "Samples have nan numbers"
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
self.check_over_configs(lower_order_final=_lowerCamelCase )
self.check_over_configs(lower_order_final=_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCamelCase ,time_step=0 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.full_loop()
__lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.full_loop(prediction_type='''v_prediction''' )
__lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1E-3
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(thresholding=_lowerCamelCase ,dynamic_thresholding_ratio=0 )
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = 10
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = model(_lowerCamelCase ,_lowerCamelCase )
__lowercase = scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 217 |
'''simple docstring'''
import numpy as np
def _lowerCAmelCase ( lowerCamelCase_ : np.array ):
return 1 / (1 + np.exp(-vector ))
def _lowerCAmelCase ( lowerCamelCase_ : np.array ):
return vector * sigmoid(1.7_02 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 | 1 |
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return base * power(snake_case__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
__a = int(input('Enter the base: ').strip())
__a = int(input('Enter the exponent: ').strip())
__a = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__a = 1 / result
print(f"{base} to the power of {exponent} is {result}")
| 30 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: List[Any]=0.999 , __UpperCAmelCase: Tuple="cosine" , ) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase: List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase: List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCamelCase__ : Dict = []
for i in range(__UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = i / num_diffusion_timesteps
UpperCamelCase__ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
a : Union[str, Any] = 2
@register_to_config
def __init__( self, __magic_name__ = 1000, __magic_name__ = 0.0_0085, __magic_name__ = 0.012, __magic_name__ = "linear", __magic_name__ = None, __magic_name__ = "epsilon", __magic_name__ = "linspace", __magic_name__ = 0, ) -> Tuple:
"""simple docstring"""
if trained_betas is not None:
UpperCamelCase__ : int = torch.tensor(__magic_name__, dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase__ : Dict = torch.linspace(__magic_name__, __magic_name__, __magic_name__, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase__ : List[str] = (
torch.linspace(beta_start**0.5, beta_end**0.5, __magic_name__, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase__ : str = betas_for_alpha_bar(__magic_name__ )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
UpperCamelCase__ : Optional[int] = 1.0 - self.betas
UpperCamelCase__ : List[Any] = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None ) -> str:
"""simple docstring"""
if schedule_timesteps is None:
UpperCamelCase__ : Dict = self.timesteps
UpperCamelCase__ : Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase__ : List[str] = 1 if len(__magic_name__ ) > 1 else 0
else:
UpperCamelCase__ : List[Any] = timestep.cpu().item() if torch.is_tensor(__magic_name__ ) else timestep
UpperCamelCase__ : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.index_for_timestep(__magic_name__ )
if self.state_in_first_order:
UpperCamelCase__ : str = self.sigmas[step_index]
else:
UpperCamelCase__ : Optional[int] = self.sigmas_interpol[step_index]
UpperCamelCase__ : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = num_inference_steps
UpperCamelCase__ : Tuple = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase__ : Union[str, Any] = np.linspace(0, num_train_timesteps - 1, __magic_name__, dtype=__magic_name__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase__ : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase__ : List[str] = (np.arange(0, __magic_name__ ) * step_ratio).round()[::-1].copy().astype(__magic_name__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase__ : Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase__ : List[str] = (np.arange(__magic_name__, 0, -step_ratio )).round().copy().astype(__magic_name__ )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
UpperCamelCase__ : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase__ : Optional[Any] = torch.from_numpy(np.log(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase__ : Any = np.interp(__magic_name__, np.arange(0, len(__magic_name__ ) ), __magic_name__ )
UpperCamelCase__ : Union[str, Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase__ : Any = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ )
# interpolate sigmas
UpperCamelCase__ : int = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp()
UpperCamelCase__ : List[str] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase__ : str = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__magic_name__ ).startswith('''mps''' ):
# mps does not support float64
UpperCamelCase__ : Optional[Any] = torch.from_numpy(__magic_name__ ).to(__magic_name__, dtype=torch.floataa )
else:
UpperCamelCase__ : List[Any] = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
# interpolate timesteps
UpperCamelCase__ : str = self.sigma_to_t(__magic_name__ ).to(__magic_name__, dtype=timesteps.dtype )
UpperCamelCase__ : Dict = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten()
UpperCamelCase__ : Optional[int] = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCamelCase__ : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase__ : Dict = defaultdict(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
# get log sigma
UpperCamelCase__ : Any = sigma.log()
# get distribution
UpperCamelCase__ : List[str] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCamelCase__ : int = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCamelCase__ : Optional[Any] = low_idx + 1
UpperCamelCase__ : str = self.log_sigmas[low_idx]
UpperCamelCase__ : int = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase__ : List[Any] = (low - log_sigma) / (low - high)
UpperCamelCase__ : str = w.clamp(0, 1 )
# transform interpolation to time range
UpperCamelCase__ : Tuple = (1 - w) * low_idx + w * high_idx
UpperCamelCase__ : int = t.view(sigma.shape )
return t
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.sample is None
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = True, ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.index_for_timestep(__magic_name__ )
# advance index counter by 1
UpperCamelCase__ : int = timestep.cpu().item() if torch.is_tensor(__magic_name__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase__ : Optional[Any] = self.sigmas[step_index]
UpperCamelCase__ : Union[str, Any] = self.sigmas_interpol[step_index + 1]
UpperCamelCase__ : List[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCamelCase__ : Tuple = self.sigmas[step_index - 1]
UpperCamelCase__ : Tuple = self.sigmas_interpol[step_index]
UpperCamelCase__ : Dict = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase__ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase__ : List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase__ : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase__ : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase__ : List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase__ : List[str] = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCamelCase__ : Dict = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCamelCase__ : List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCamelCase__ : Union[str, Any] = sigma_next - sigma_hat
UpperCamelCase__ : Union[str, Any] = self.sample
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Optional[int] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, ) -> torch.FloatTensor:
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase__ : List[str] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__magic_name__ ):
# mps does not support float64
UpperCamelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device, dtype=torch.floataa )
UpperCamelCase__ : Tuple = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
UpperCamelCase__ : str = self.timesteps.to(original_samples.device )
UpperCamelCase__ : int = timesteps.to(original_samples.device )
UpperCamelCase__ : Any = [self.index_for_timestep(__magic_name__, __magic_name__ ) for t in timesteps]
UpperCamelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase__ : int = sigma.unsqueeze(-1 )
UpperCamelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps
| 201 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCamelCase__ : List[Any] =StableDiffusionXLImgaImgPipeline
UpperCamelCase__ : int =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
UpperCamelCase__ : Tuple =PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCamelCase__ : List[str] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : Any =IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), attention_head_dim=(2, 4), use_linear_projection=__lowercase, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
lowercase__ = EulerDiscreteScheduler(
beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, )
lowercase__ = CLIPTextModel(__lowercase )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=__lowercase )
lowercase__ = CLIPTextModelWithProjection(__lowercase )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=__lowercase )
lowercase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def A__ ( self : Union[str, Any], __lowercase : List[str], __lowercase : str=0 ):
lowercase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(__lowercase ) ).to(__lowercase )
lowercase__ = image / 2 + 0.5
if str(__lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(__lowercase )
else:
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def A__ ( self : Optional[Any] ):
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionXLImgaImgPipeline(**__lowercase )
lowercase__ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs(__lowercase )
lowercase__ = sd_pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self : str ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def A__ ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A__ ( self : List[str] ):
pass
def A__ ( self : List[Any] ):
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionXLImgaImgPipeline(**__lowercase )
lowercase__ = sd_pipe.to(__lowercase )
lowercase__ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
lowercase__ = self.get_dummy_inputs(__lowercase )
lowercase__ = 3 * ["this is a negative prompt"]
lowercase__ = negative_prompt
lowercase__ = 3 * [inputs["prompt"]]
lowercase__ = sd_pipe(**__lowercase )
lowercase__ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowercase__ = self.get_dummy_inputs(__lowercase )
lowercase__ = 3 * ["this is a negative prompt"]
lowercase__ = 3 * [inputs.pop("prompt" )]
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = sd_pipe.encode_prompt(__lowercase, negative_prompt=__lowercase )
lowercase__ = sd_pipe(
**__lowercase, prompt_embeds=__lowercase, negative_prompt_embeds=__lowercase, pooled_prompt_embeds=__lowercase, negative_pooled_prompt_embeds=__lowercase, )
lowercase__ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase):
def A__ ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : str, __lowercase : Optional[Any], __lowercase : str="cpu", __lowercase : List[str]=torch.floataa, __lowercase : List[Any]=0 ):
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase__ = np.random.RandomState(__lowercase ).standard_normal((1, 4, 64, 64) )
lowercase__ = torch.from_numpy(__lowercase ).to(device=__lowercase, dtype=__lowercase )
lowercase__ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def A__ ( self : Any ):
lowercase__ = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_inputs(__lowercase )
lowercase__ = pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 366 |
from pathlib import Path
import fire
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for path in src_dir.iterdir():
lowercase__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowercase__ = dest_dir.joinpath(path.name )
print(SCREAMING_SNAKE_CASE_ )
dest_path.open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 224 | 0 |
'''simple docstring'''
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
__A = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
__A = str(bin(UpperCAmelCase ) )[2:]
__A = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ : Tuple = logging.get_logger(__name__)
# General docstring
a__ : List[Any] = "RegNetConfig"
# Base docstring
a__ : Dict = "facebook/regnet-y-040"
a__ : Optional[int] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
a__ : Union[str, Any] = "facebook/regnet-y-040"
a__ : Union[str, Any] = "tabby, tabby cat"
a__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( nn.Module):
def __init__( self :Union[str, Any] , _A :int , _A :int , _A :int = 3 , _A :int = 1 , _A :int = 1 , _A :Optional[str] = "relu" , ) -> int:
'''simple docstring'''
super().__init__()
__A = nn.Convad(
_A , _A , kernel_size=_A , stride=_A , padding=kernel_size // 2 , groups=_A , bias=_A , )
__A = nn.BatchNormad(_A )
__A = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase_ ( self :Tuple , _A :Union[str, Any] ) -> int:
'''simple docstring'''
__A = self.convolution(_A )
__A = self.normalization(_A )
__A = self.activation(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[int] , _A :RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
__A = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__A = config.num_channels
def lowercase_ ( self :Any , _A :Optional[int] ) -> Optional[int]:
'''simple docstring'''
__A = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__A = self.embedder(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[int] , _A :int , _A :int , _A :int = 2 ) -> Any:
'''simple docstring'''
super().__init__()
__A = nn.Convad(_A , _A , kernel_size=1 , stride=_A , bias=_A )
__A = nn.BatchNormad(_A )
def lowercase_ ( self :Optional[int] , _A :Tensor ) -> Tensor:
'''simple docstring'''
__A = self.convolution(_A )
__A = self.normalization(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[Any] , _A :int , _A :int ) -> List[str]:
'''simple docstring'''
super().__init__()
__A = nn.AdaptiveAvgPoolad((1, 1) )
__A = nn.Sequential(
nn.Convad(_A , _A , kernel_size=1 ) , nn.ReLU() , nn.Convad(_A , _A , kernel_size=1 ) , nn.Sigmoid() , )
def lowercase_ ( self :Any , _A :str ) -> int:
'''simple docstring'''
__A = self.pooler(_A )
__A = self.attention(_A )
__A = hidden_state * attention
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :int , _A :RegNetConfig , _A :int , _A :int , _A :int = 1 ) -> List[Any]:
'''simple docstring'''
super().__init__()
__A = in_channels != out_channels or stride != 1
__A = max(1 , out_channels // config.groups_width )
__A = (
RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
__A = nn.Sequential(
RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , )
__A = ACTaFN[config.hidden_act]
def lowercase_ ( self :Optional[Any] , _A :int ) -> int:
'''simple docstring'''
__A = hidden_state
__A = self.layer(_A )
__A = self.shortcut(_A )
hidden_state += residual
__A = self.activation(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[int] , _A :RegNetConfig , _A :int , _A :int , _A :int = 1 ) -> Any:
'''simple docstring'''
super().__init__()
__A = in_channels != out_channels or stride != 1
__A = max(1 , out_channels // config.groups_width )
__A = (
RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
__A = nn.Sequential(
RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , )
__A = ACTaFN[config.hidden_act]
def lowercase_ ( self :int , _A :int ) -> int:
'''simple docstring'''
__A = hidden_state
__A = self.layer(_A )
__A = self.shortcut(_A )
hidden_state += residual
__A = self.activation(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Tuple , _A :RegNetConfig , _A :int , _A :int , _A :int = 2 , _A :int = 2 , ) -> Any:
'''simple docstring'''
super().__init__()
__A = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__A = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_A , _A , _A , stride=_A , ) , *[layer(_A , _A , _A ) for _ in range(depth - 1 )] , )
def lowercase_ ( self :List[str] , _A :Optional[int] ) -> Tuple:
'''simple docstring'''
__A = self.layers(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Union[str, Any] , _A :RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
__A = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__A = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_A , config.depths[1:] ):
self.stages.append(RegNetStage(_A , _A , _A , depth=_A ) )
def lowercase_ ( self :str , _A :Tensor , _A :bool = False , _A :bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
__A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__A = hidden_states + (hidden_state,)
__A = stage_module(_A )
if output_hidden_states:
__A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : int = RegNetConfig
UpperCAmelCase__ : Dict = 'regnet'
UpperCAmelCase__ : int = 'pixel_values'
UpperCAmelCase__ : Optional[int] = True
def lowercase_ ( self :str , _A :Optional[int] ) -> Tuple:
'''simple docstring'''
if isinstance(_A , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_A , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase_ ( self :int , _A :str , _A :Dict=False ) -> Dict:
'''simple docstring'''
if isinstance(_A , _A ):
__A = value
a__ : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
a__ : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :List[str] , _A :List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(_A )
__A = config
__A = RegNetEmbeddings(_A )
__A = RegNetEncoder(_A )
__A = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ ( self :List[Any] , _A :Tensor , _A :Optional[bool] = None , _A :Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.embedder(_A )
__A = self.encoder(
_A , output_hidden_states=_A , return_dict=_A )
__A = encoder_outputs[0]
__A = self.pooler(_A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[int] , _A :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_A )
__A = config.num_labels
__A = RegNetModel(_A )
# classification head
__A = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ ( self :Optional[int] , _A :Optional[torch.FloatTensor] = None , _A :Optional[torch.LongTensor] = None , _A :Optional[bool] = None , _A :Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.regnet(_A , output_hidden_states=_A , return_dict=_A )
__A = outputs.pooler_output if return_dict else outputs[1]
__A = self.classifier(_A )
__A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__A = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__A = 'single_label_classification'
else:
__A = 'multi_label_classification'
if self.config.problem_type == "regression":
__A = MSELoss()
if self.num_labels == 1:
__A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__A = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__A = BCEWithLogitsLoss()
__A = loss_fct(_A , _A )
if not return_dict:
__A = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 161 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Optional[int] ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCamelCase_ = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
lowerCamelCase_ = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCamelCase_ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = dct.pop(lowercase )
lowerCamelCase_ = val
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowerCamelCase_ = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase_ = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = ViTConfig(image_size=3_84 , qkv_bias=lowercase )
lowerCamelCase_ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCamelCase_ = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCamelCase_ = 10_24
lowerCamelCase_ = 40_96
lowerCamelCase_ = 24
lowerCamelCase_ = 16
lowerCamelCase_ = 10_24
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase_ = False
lowerCamelCase_ = 'relu'
lowerCamelCase_ = 10_24
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
# load HuggingFace model
lowerCamelCase_ = ViTModel(lowercase , add_pooling_layer=lowercase )
lowerCamelCase_ = TrOCRForCausalLM(lowercase )
lowerCamelCase_ = VisionEncoderDecoderModel(encoder=lowercase , decoder=lowercase )
model.eval()
# load state_dict of original model, rename some keys
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowercase , map_location='cpu' , check_hash=lowercase )['model']
lowerCamelCase_ = create_rename_keys(lowercase , lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCamelCase_ = state_dict.pop(lowercase )
if key.startswith('decoder' ) and "output_projection" not in key:
lowerCamelCase_ = val
else:
lowerCamelCase_ = val
# load state dict
model.load_state_dict(lowercase )
# Check outputs on an image
lowerCamelCase_ = ViTImageProcessor(size=encoder_config.image_size )
lowerCamelCase_ = RobertaTokenizer.from_pretrained('roberta-large' )
lowerCamelCase_ = TrOCRProcessor(lowercase , lowercase )
lowerCamelCase_ = processor(images=prepare_img(lowercase ) , return_tensors='pt' ).pixel_values
# verify logits
lowerCamelCase_ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCamelCase_ = model(pixel_values=lowercase , decoder_input_ids=lowercase )
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowercase , atol=1e-3 ), "First elements of logits not as expected"
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowerCamelCase : Tuple = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 208 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase : Any = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''rag'''
UpperCamelCase = True
def __init__( self : Optional[Any] , A_ : Optional[Any]=None , A_ : Any=True , A_ : Dict=None , A_ : Optional[int]=None , A_ : str=None , A_ : int=None , A_ : List[Any]=None , A_ : List[str]=" / " , A_ : Tuple=" // " , A_ : Union[str, Any]=5 , A_ : Optional[Any]=300 , A_ : int=768 , A_ : Dict=8 , A_ : int="wiki_dpr" , A_ : int="train" , A_ : List[str]="compressed" , A_ : Tuple=None , A_ : Optional[Any]=None , A_ : Optional[int]=False , A_ : str=False , A_ : Optional[Any]=0.0 , A_ : Union[str, Any]=True , A_ : List[Any]=False , A_ : Union[str, Any]=False , A_ : Dict=False , A_ : str=True , A_ : List[str]=None , **A_ : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(
bos_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , is_encoder_decoder=A_ , prefix=A_ , vocab_size=A_ , **A_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCamelCase_ = kwargs.pop('question_encoder' )
lowerCamelCase_ = question_encoder_config.pop('model_type' )
lowerCamelCase_ = kwargs.pop('generator' )
lowerCamelCase_ = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase_ = AutoConfig.for_model(A_ , **A_ )
lowerCamelCase_ = AutoConfig.for_model(A_ , **A_ )
lowerCamelCase_ = reduce_loss
lowerCamelCase_ = label_smoothing
lowerCamelCase_ = exclude_bos_score
lowerCamelCase_ = do_marginalize
lowerCamelCase_ = title_sep
lowerCamelCase_ = doc_sep
lowerCamelCase_ = n_docs
lowerCamelCase_ = max_combined_length
lowerCamelCase_ = dataset
lowerCamelCase_ = dataset_split
lowerCamelCase_ = index_name
lowerCamelCase_ = retrieval_vector_size
lowerCamelCase_ = retrieval_batch_size
lowerCamelCase_ = passages_path
lowerCamelCase_ = index_path
lowerCamelCase_ = use_dummy_dataset
lowerCamelCase_ = output_retrieved
lowerCamelCase_ = do_deduplication
lowerCamelCase_ = use_cache
if self.forced_eos_token_id is None:
lowerCamelCase_ = getattr(self.generator , 'forced_eos_token_id' , A_ )
@classmethod
def a__ ( cls : str , A_ : PretrainedConfig , A_ : PretrainedConfig , **A_ : List[str] ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A_ )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.question_encoder.to_dict()
lowerCamelCase_ = self.generator.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 208 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class UpperCAmelCase ( snake_case_ ):
_lowercase: str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase: ClassVar[Features] = Features({'''audio''': Audio()} )
_lowercase: ClassVar[Features] = Features({'''labels''': ClassLabel} )
_lowercase: str = "audio"
_lowercase: str = "labels"
def lowercase__ ( self : Optional[int] , __snake_case : List[str] ) -> Optional[int]:
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase = copy.deepcopy(self )
_lowerCAmelCase = self.label_schema.copy()
_lowerCAmelCase = features[self.label_column]
_lowerCAmelCase = label_schema
return task_template
@property
def lowercase__ ( self : Dict ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 70 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] =logging.get_logger(__name__)
A__ : Any =torch.device('''cpu''')
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = dct.pop(lowerCAmelCase )
_lowerCAmelCase = val
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for k in state_dict.keys():
_lowerCAmelCase = k
if ".pwconv" in k:
_lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
_lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
_lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
_lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_lowerCAmelCase = k_new.split(""".""" )
if ls[2].isdigit():
_lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase = 10_00
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """imagenet-1k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_lowerCAmelCase = [3, 3, 6, 4]
_lowerCAmelCase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
_lowerCAmelCase = [3, 3, 9, 6]
_lowerCAmelCase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
_lowerCAmelCase = [4, 3, 10, 5]
_lowerCAmelCase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
_lowerCAmelCase = [4, 4, 12, 6]
_lowerCAmelCase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase )
else:
_lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
_lowerCAmelCase = checkpoint
_lowerCAmelCase = create_rename_keys(lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
_lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval()
hf_model.load_state_dict(lowerCAmelCase )
# prepare test inputs
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" )
# compare outputs from both models
_lowerCAmelCase = get_expected_output(lowerCAmelCase )
_lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1e-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
A__ : Tuple =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 70 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _lowerCamelCase ( _lowercase ):
def __init__(self , **__a ) -> Optional[int]:
super().__init__(**__a )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def snake_case_ (self , **__a ) -> List[Any]:
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
UpperCamelCase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
UpperCamelCase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
UpperCamelCase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
UpperCamelCase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
UpperCamelCase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
UpperCamelCase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
UpperCamelCase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
UpperCamelCase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> str:
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def snake_case_ (self , __a , __a=64 , __a = 0 , __a = 5_12 / 15_00 , __a = 32 , __a = 1 , ) -> List[str]:
UpperCamelCase = load_image(__a )
UpperCamelCase = self.image_processor.size["longest_edge"]
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCamelCase = self.image_processor(images=__a , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase = self.get_inference_context()
with inference_context():
UpperCamelCase = self._ensure_tensor_on_device(__a , device=self.device )
UpperCamelCase = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
UpperCamelCase = image_embeddings
UpperCamelCase = grid_points.shape[1]
UpperCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , __a , __a ):
UpperCamelCase = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase = input_labels[:, i : i + points_per_batch]
UpperCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> int:
UpperCamelCase = model_inputs.pop("input_boxes" )
UpperCamelCase = model_inputs.pop("is_last" )
UpperCamelCase = model_inputs.pop("original_sizes" ).tolist()
UpperCamelCase = model_inputs.pop("reshaped_input_sizes" ).tolist()
UpperCamelCase = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase = model_outputs["pred_masks"]
UpperCamelCase = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCamelCase = model_outputs["iou_scores"]
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Optional[int]:
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
UpperCamelCase = torch.cat(__a )
UpperCamelCase = torch.cat(__a )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCamelCase = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCamelCase = {}
if output_rle_mask:
UpperCamelCase = rle_mask
if output_bboxes_mask:
UpperCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 244 | 0 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCamelCase ( __lowerCamelCase : Tuple ) ->Tuple:
_SCREAMING_SNAKE_CASE = fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" , __lowerCamelCase ).groups()[0]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A=None , A=None ) -> int:
_SCREAMING_SNAKE_CASE = file_names
_SCREAMING_SNAKE_CASE = image_transform
_SCREAMING_SNAKE_CASE = label_to_id
def __len__( self ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.file_names[idx]
_SCREAMING_SNAKE_CASE = PIL.Image.open(A )
_SCREAMING_SNAKE_CASE = raw_image.convert("""RGB""" )
if self.image_transform is not None:
_SCREAMING_SNAKE_CASE = self.image_transform(A )
_SCREAMING_SNAKE_CASE = extract_label(A )
if self.label_to_id is not None:
_SCREAMING_SNAKE_CASE = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) ->str:
# Initialize accelerator
if args.with_tracking:
_SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
_SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config["""lr"""]
_SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE = config["""image_size"""]
if not isinstance(__lowerCamelCase , (list, tuple) ):
_SCREAMING_SNAKE_CASE = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
_SCREAMING_SNAKE_CASE = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_SCREAMING_SNAKE_CASE = int(args.checkpointing_steps )
else:
raise ValueError(
F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
_SCREAMING_SNAKE_CASE = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_SCREAMING_SNAKE_CASE = os.path.split(__lowerCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__lowerCamelCase , __lowerCamelCase )
# Grab all the image filenames
_SCREAMING_SNAKE_CASE = [os.path.join(args.data_dir , __lowerCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
_SCREAMING_SNAKE_CASE = [extract_label(__lowerCamelCase ) for fname in file_names]
_SCREAMING_SNAKE_CASE = list(set(__lowerCamelCase ) )
id_to_label.sort()
_SCREAMING_SNAKE_CASE = {lbl: i for i, lbl in enumerate(__lowerCamelCase )}
# Set the seed before splitting the data.
np.random.seed(__lowerCamelCase )
torch.manual_seed(__lowerCamelCase )
torch.cuda.manual_seed_all(__lowerCamelCase )
# Split our filenames between train and validation
_SCREAMING_SNAKE_CASE = np.random.permutation(len(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = int(0.8 * len(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = random_perm[:cut]
_SCREAMING_SNAKE_CASE = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_SCREAMING_SNAKE_CASE = Compose([RandomResizedCrop(__lowerCamelCase , scale=(0.5, 1.0) ), ToTensor()] )
_SCREAMING_SNAKE_CASE = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase )
# For evaluation, we use a deterministic Resize
_SCREAMING_SNAKE_CASE = Compose([Resize(__lowerCamelCase ), ToTensor()] )
_SCREAMING_SNAKE_CASE = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
_SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = create_model("""resnet50d""" , pretrained=__lowerCamelCase , num_classes=len(__lowerCamelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_SCREAMING_SNAKE_CASE = False
for param in model.get_classifier().parameters():
_SCREAMING_SNAKE_CASE = True
# We normalize the batches of images to be a bit faster.
_SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
_SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_SCREAMING_SNAKE_CASE = OneCycleLR(optimizer=__lowerCamelCase , max_lr=__lowerCamelCase , epochs=__lowerCamelCase , steps_per_epoch=len(__lowerCamelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the starting epoch so files are named properly
_SCREAMING_SNAKE_CASE = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
_SCREAMING_SNAKE_CASE = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_SCREAMING_SNAKE_CASE = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_SCREAMING_SNAKE_CASE = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_SCREAMING_SNAKE_CASE = os.path.splitext(__lowerCamelCase )[0]
if "epoch" in training_difference:
_SCREAMING_SNAKE_CASE = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
_SCREAMING_SNAKE_CASE = None
else:
_SCREAMING_SNAKE_CASE = int(training_difference.replace("""step_""" , """""" ) )
_SCREAMING_SNAKE_CASE = resume_step // len(__lowerCamelCase )
resume_step -= starting_epoch * len(__lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase , __lowerCamelCase ):
model.train()
if args.with_tracking:
_SCREAMING_SNAKE_CASE = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_SCREAMING_SNAKE_CASE = accelerator.skip_first_batches(__lowerCamelCase , __lowerCamelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_SCREAMING_SNAKE_CASE = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()}
_SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.nn.functional.cross_entropy(__lowerCamelCase , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = F'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase )
accelerator.save_state(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()}
_SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = outputs.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
_SCREAMING_SNAKE_CASE = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_SCREAMING_SNAKE_CASE = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(__lowerCamelCase ),
"""epoch""": epoch,
} , step=__lowerCamelCase , )
if checkpointing_steps == "epoch":
_SCREAMING_SNAKE_CASE = F'epoch_{epoch}'
if args.output_dir is not None:
_SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase )
accelerator.save_state(__lowerCamelCase )
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase ( ) ->int:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=__lowerCamelCase , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=__lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__lowerCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 58 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ):
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCAmelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 289 | 0 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowerCAmelCase :Optional[int] = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : int = {}
state_dict.pop('pixel_mean' , lowerCamelCase__ )
state_dict.pop('pixel_std' , lowerCamelCase__ )
__magic_name__ : Optional[Any] = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__magic_name__ : Optional[Any] = key.replace(lowerCamelCase__ , lowerCamelCase__ )
if re.match(lowerCamelCase__ , lowerCamelCase__ ):
__magic_name__ : Tuple = int(re.match(lowerCamelCase__ , lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
__magic_name__ : Union[str, Any] = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
__magic_name__ : Optional[Any] = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
__magic_name__ : Tuple = key.replace('layers.2' , 'proj_out' )
__magic_name__ : Union[str, Any] = value
__magic_name__ : Optional[Any] = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="ybelkada/segment-anything" ):
"""simple docstring"""
__magic_name__ : str = hf_hub_download(lowerCamelCase__ , f'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
__magic_name__ : str = SamConfig()
elif "sam_vit_l" in model_name:
__magic_name__ : List[Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__magic_name__ : Optional[int] = SamConfig(
vision_config=lowerCamelCase__ , )
elif "sam_vit_h" in model_name:
__magic_name__ : str = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__magic_name__ : Tuple = SamConfig(
vision_config=lowerCamelCase__ , )
__magic_name__ : Union[str, Any] = torch.load(lowerCamelCase__ , map_location='cpu' )
__magic_name__ : List[str] = replace_keys(lowerCamelCase__ )
__magic_name__ : Any = SamImageProcessor()
__magic_name__ : List[str] = SamProcessor(image_processor=lowerCamelCase__ )
__magic_name__ : str = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
__magic_name__ : List[str] = hf_model.to('cuda' )
__magic_name__ : Tuple = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
__magic_name__ : Optional[Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert('RGB' )
__magic_name__ : List[str] = [[[400, 650]]]
__magic_name__ : str = [[1]]
__magic_name__ : int = processor(images=np.array(lowerCamelCase__ ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__magic_name__ : Any = hf_model(**lowerCamelCase__ )
__magic_name__ : str = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__magic_name__ : Optional[int] = processor(
images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__magic_name__ : Union[str, Any] = hf_model(**lowerCamelCase__ )
__magic_name__ : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__magic_name__ : Any = ((75, 275, 1725, 850),)
__magic_name__ : Optional[Any] = processor(images=np.array(lowerCamelCase__ ) , input_boxes=lowerCamelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__magic_name__ : List[str] = hf_model(**lowerCamelCase__ )
__magic_name__ : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__magic_name__ : Dict = [[[400, 650], [800, 650]]]
__magic_name__ : Any = [[1, 1]]
__magic_name__ : Tuple = processor(
images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__magic_name__ : int = hf_model(**lowerCamelCase__ )
__magic_name__ : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
lowerCAmelCase :List[Any] = argparse.ArgumentParser()
lowerCAmelCase :Union[str, Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
lowerCAmelCase :Union[str, Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id) | 353 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase :str = logging.get_logger(__name__)
lowerCAmelCase :str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
lowerCAmelCase :List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = {}
with open(lowerCAmelCase , 'r' ) as file:
for line_number, line in enumerate(lowerCAmelCase ):
__magic_name__ : Optional[Any] = line.strip()
if line:
__magic_name__ : Optional[int] = line.split()
__magic_name__ : Any = line_number
__magic_name__ : Union[str, Any] = words[0]
__magic_name__ : Dict = value
return result
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
for attribute in key.split('.' ):
__magic_name__ : Optional[Any] = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase ):
__magic_name__ : Optional[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
__magic_name__ : List[Any] = 'param'
if weight_type is not None and weight_type != "param":
__magic_name__ : List[str] = getattr(lowerCAmelCase , lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
__magic_name__ : Tuple = hf_pointer
for attribute in hf_param_name.split('.' ):
__magic_name__ : str = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Union[str, Any] = shape_pointer.shape
# let's reduce dimension
__magic_name__ : int = value[0]
else:
__magic_name__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__magic_name__ : Optional[Any] = value
elif weight_type == "weight_g":
__magic_name__ : List[str] = value
elif weight_type == "weight_v":
__magic_name__ : Optional[int] = value
elif weight_type == "bias":
__magic_name__ : Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__magic_name__ : Optional[int] = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = value
else:
__magic_name__ : int = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase ):
__magic_name__ : List[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
__magic_name__ : Dict = 'param'
if weight_type is not None and weight_type != "param":
__magic_name__ : Union[str, Any] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__magic_name__ : Optional[Any] = '.'.join([key, hf_param_name] )
else:
__magic_name__ : int = key
__magic_name__ : int = value if 'lm_head' in full_key else value[0]
lowerCAmelCase :int = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple=None , lowerCAmelCase : Tuple=None ):
"""simple docstring"""
__magic_name__ : Dict = False
for key, mapped_key in MAPPING.items():
__magic_name__ : int = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__magic_name__ : Union[str, Any] = True
if "*" in mapped_key:
__magic_name__ : List[Any] = name.split(lowerCAmelCase )[0].split('.' )[-2]
__magic_name__ : List[str] = mapped_key.replace('*' , lowerCAmelCase )
if "weight_g" in name:
__magic_name__ : str = 'weight_g'
elif "weight_v" in name:
__magic_name__ : Optional[int] = 'weight_v'
elif "bias" in name:
__magic_name__ : int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ : List[str] = 'weight'
else:
__magic_name__ : Any = None
if hf_dict is not None:
rename_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return is_used
return is_used
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = []
__magic_name__ : Any = fairseq_model.state_dict()
__magic_name__ : Optional[int] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
__magic_name__ : Optional[int] = True
else:
__magic_name__ : str = load_wavaveca_layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Any = full_name.split('conv_layers.' )[-1]
__magic_name__ : int = name.split('.' )
__magic_name__ : Any = int(items[0] )
__magic_name__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__magic_name__ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__magic_name__ : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__magic_name__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__magic_name__ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=False ):
"""simple docstring"""
if config_path is not None:
__magic_name__ : int = WavaVecaConfig.from_pretrained(lowerCAmelCase )
else:
__magic_name__ : List[str] = WavaVecaConfig()
if is_seq_class:
__magic_name__ : Any = read_txt_into_dict(lowerCAmelCase )
__magic_name__ : Optional[Any] = idalabel
__magic_name__ : Union[str, Any] = WavaVecaForSequenceClassification(lowerCAmelCase )
__magic_name__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
feature_extractor.save_pretrained(lowerCAmelCase )
elif is_finetuned:
if dict_path:
__magic_name__ : str = Dictionary.load(lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__ : Dict = target_dict.pad_index
__magic_name__ : Union[str, Any] = target_dict.bos_index
__magic_name__ : Union[str, Any] = target_dict.eos_index
__magic_name__ : Union[str, Any] = len(target_dict.symbols )
__magic_name__ : Dict = os.path.join(lowerCAmelCase , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase ) )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
__magic_name__ : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__magic_name__ : Any = 0
__magic_name__ : Optional[int] = 1
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = WavaVecaCTCTokenizer(
lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase , )
__magic_name__ : Tuple = True if config.feat_extract_norm == 'layer' else False
__magic_name__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
__magic_name__ : List[Any] = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
__magic_name__ : Dict = WavaVecaForCTC(lowerCAmelCase )
else:
__magic_name__ : Tuple = WavaVecaForPreTraining(lowerCAmelCase )
if is_finetuned or is_seq_class:
__magic_name__ , __magic_name__ , __magic_name__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__magic_name__ : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
__magic_name__ : Dict = fairseq.tasks.setup_task(lowerCAmelCase )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase )
__magic_name__ : Any = model[0].eval()
recursively_load_weights(lowerCAmelCase , lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
lowerCAmelCase :Dict = parser.parse_args()
lowerCAmelCase :Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 275 | 0 |
import math
__A =1_0
__A =7
__A =BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase_ ( lowerCamelCase__ = 2_0 ):
lowerCamelCase_ = math.comb(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
lowerCamelCase_ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 19 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _a , unittest.TestCase ):
_A : str = CTRLTokenizer
_A : List[str] = False
_A : int = False
def __UpperCamelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE:Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
SCREAMING_SNAKE_CASE:Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE:str = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
SCREAMING_SNAKE_CASE:Union[str, Any] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE:Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def __UpperCamelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Any ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Optional[Any] = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Tuple = "adapt react readapt apt"
return input_text, output_text
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[str] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE:Any = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Any = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE:Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
| 139 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[Any]:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = to_pil_image(__A )
_snake_case , _snake_case = pil_image.size
_snake_case = pytesseract.image_to_data(__A , lang=__A , output_type='dict' , config=__A )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_snake_case = [idx for idx, word in enumerate(__A ) if not word.strip()]
_snake_case = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_snake_case = []
for x, y, w, h in zip(__A , __A , __A , __A ):
_snake_case = [x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
_snake_case = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""pixel_values"""]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = "" , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = size if size is not None else {'height': 2_24, 'width': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_value
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
_snake_case = apply_ocr
_snake_case = ocr_lang
_snake_case = tesseract_config
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_snake_case = (size['height'], size['width'])
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase_ )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = apply_ocr if apply_ocr is not None else self.apply_ocr
_snake_case = ocr_lang if ocr_lang is not None else self.ocr_lang
_snake_case = tesseract_config if tesseract_config is not None else self.tesseract_config
_snake_case = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCAmelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
_snake_case = []
_snake_case = []
for image in images:
_snake_case , _snake_case = apply_tesseract(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
words_batch.append(lowerCAmelCase_ )
boxes_batch.append(lowerCAmelCase_ )
if do_resize:
_snake_case = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_snake_case = BatchFeature(data={'pixel_values': images} , tensor_type=lowerCAmelCase_ )
if apply_ocr:
_snake_case = words_batch
_snake_case = boxes_batch
return data
| 366 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """gptj"""
__lowercase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=5_04_00 , lowerCAmelCase_=20_48 , lowerCAmelCase_=40_96 , lowerCAmelCase_=28 , lowerCAmelCase_=16 , lowerCAmelCase_=64 , lowerCAmelCase_=None , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=False , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = n_positions
_snake_case = n_embd
_snake_case = n_layer
_snake_case = n_head
_snake_case = n_inner
_snake_case = rotary_dim
_snake_case = activation_function
_snake_case = resid_pdrop
_snake_case = embd_pdrop
_snake_case = attn_pdrop
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = "default" , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , task=lowerCAmelCase_ , patching_specs=lowerCAmelCase_ , use_past=lowerCAmelCase_ )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase_ ):
# TODO: how to do that better?
_snake_case = 0
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self._config.n_head
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 13
| 160 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = ['''ConditionalDetrFeatureExtractor''']
_SCREAMING_SNAKE_CASE : List[Any] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314 |
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__snake_case : str = get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCAmelCase__ = Extractor
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCAmelCase__ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = False ):
"""simple docstring"""
lowerCAmelCase__ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
lowerCAmelCase__ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class __SCREAMING_SNAKE_CASE ( __lowercase):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
...
class __SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
_SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with open(_UpperCamelCase , 'rb' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def UpperCamelCase__ ( cls , _UpperCamelCase , _UpperCamelCase = b"" ):
"""simple docstring"""
if not magic_number:
lowerCAmelCase__ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
lowerCAmelCase__ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class __SCREAMING_SNAKE_CASE ( __lowercase):
@classmethod
def UpperCamelCase__ ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def resolved(_UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase , _UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase , _UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCAmelCase__ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
lowerCAmelCase__ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(F"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(F"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(F"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
lowerCAmelCase__ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Any = [B'''\x1F\x8B''']
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with gzip.open(_UpperCamelCase , 'rb' ) as gzip_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , _UpperCamelCase , _UpperCamelCase = b"" ):
"""simple docstring"""
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , 'rb' ) as fp:
lowerCAmelCase__ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCAmelCase__ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
lowerCAmelCase__ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , 'r' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Tuple = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Tuple = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
lowerCAmelCase__ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Tuple = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
lowerCAmelCase__ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , 'rb' ) as ifh, open(_UpperCamelCase , 'wb' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [B'''\x42\x5A\x68''']
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with bza.open(_UpperCamelCase , 'rb' ) as compressed_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , 'r' ) as archive:
archive.extractall(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Optional[int] = [B'''\x04\x22\x4D\x18''']
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(_UpperCamelCase , 'rb' ) as compressed_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class __SCREAMING_SNAKE_CASE :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , _UpperCamelCase , _UpperCamelCase = False ):
"""simple docstring"""
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=_UpperCamelCase , )
lowerCAmelCase__ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , _UpperCamelCase ): # <Added version="2.4.0"/>
"""simple docstring"""
lowerCAmelCase__ = cls._get_magic_number_max_length()
lowerCAmelCase__ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = "deprecated" , ):
"""simple docstring"""
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
lowerCAmelCase__ = str(Path(_UpperCamelCase ).with_suffix('.lock' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=_UpperCamelCase , )
lowerCAmelCase__ = extractor if extractor != 'deprecated' else extractor_format
else:
lowerCAmelCase__ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
| 122 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Tuple = ['''input_features''', '''is_longer''']
def __init__( self , _UpperCamelCase=64 , _UpperCamelCase=4_80_00 , _UpperCamelCase=4_80 , _UpperCamelCase=10 , _UpperCamelCase=10_24 , _UpperCamelCase=0.0 , _UpperCamelCase=False , _UpperCamelCase = 0 , _UpperCamelCase = 1_40_00 , _UpperCamelCase = None , _UpperCamelCase = "fusion" , _UpperCamelCase = "repeatpad" , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(
feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
lowerCAmelCase__ = top_db
lowerCAmelCase__ = truncation
lowerCAmelCase__ = padding
lowerCAmelCase__ = fft_window_size
lowerCAmelCase__ = (fft_window_size >> 1) + 1
lowerCAmelCase__ = hop_length
lowerCAmelCase__ = max_length_s
lowerCAmelCase__ = max_length_s * sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = frequency_min
lowerCAmelCase__ = frequency_max
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm=_UpperCamelCase , mel_scale='htk' , )
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm='slaney' , mel_scale='slaney' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ = spectrogram(
_UpperCamelCase , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_UpperCamelCase , log_mel='dB' , )
return log_mel_spectrogram.T
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ = [0]
# randomly choose index for each part
lowerCAmelCase__ = np.random.choice(ranges[0] )
lowerCAmelCase__ = np.random.choice(ranges[1] )
lowerCAmelCase__ = np.random.choice(ranges[2] )
lowerCAmelCase__ = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase__ = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase__ = torch.tensor(mel[None, None, :] )
lowerCAmelCase__ = torch.nn.functional.interpolate(
_UpperCamelCase , size=[chunk_frames, 64] , mode='bilinear' , align_corners=_UpperCamelCase )
lowerCAmelCase__ = mel_shrink[0][0].numpy()
lowerCAmelCase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase__ = len(_UpperCamelCase ) - max_length
lowerCAmelCase__ = np.random.randint(0 , overflow + 1 )
lowerCAmelCase__ = waveform[idx : idx + max_length]
lowerCAmelCase__ = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase__ = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
lowerCAmelCase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase__ = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase__ = False
else:
lowerCAmelCase__ = self._random_mel_fusion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
lowerCAmelCase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase__ = int(max_length / len(_UpperCamelCase ) )
lowerCAmelCase__ = np.stack(np.tile(_UpperCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase__ = int(max_length / len(_UpperCamelCase ) )
lowerCAmelCase__ = np.stack(np.tile(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase__ = np.pad(_UpperCamelCase , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase__ = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
lowerCAmelCase__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase__ = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = truncation if truncation is not None else self.truncation
lowerCAmelCase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
lowerCAmelCase__ = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray(_UpperCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase__ = [
self._get_input_mel(_UpperCamelCase , max_length if max_length else self.nb_max_samples , _UpperCamelCase , _UpperCamelCase )
for waveform in raw_speech
]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for mel, longer in padded_inputs:
input_mel.append(_UpperCamelCase )
is_longer.append(_UpperCamelCase )
if truncation == "fusion" and sum(_UpperCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase__ = np.random.randint(0 , len(_UpperCamelCase ) )
lowerCAmelCase__ = True
if isinstance(input_mel[0] , _UpperCamelCase ):
lowerCAmelCase__ = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase__ = [[longer] for longer in is_longer]
lowerCAmelCase__ = {'input_features': input_mel, 'is_longer': is_longer}
lowerCAmelCase__ = BatchFeature(_UpperCamelCase )
if return_tensors is not None:
lowerCAmelCase__ = input_features.convert_to_tensors(_UpperCamelCase )
return input_features
| 122 | 1 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[int | float]]) -> int:
'''simple docstring'''
__UpperCamelCase : str = len(_lowerCamelCase)
__UpperCamelCase : str = len(matrix[0])
__UpperCamelCase : Union[str, Any] = min(_lowerCamelCase , _lowerCamelCase)
for row in range(_lowerCamelCase):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowerCamelCase):
__UpperCamelCase : str = matrix[col][row] / matrix[row][row]
for i in range(_lowerCamelCase , _lowerCamelCase):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__UpperCamelCase : Optional[Any] = True
for i in range(row + 1 , _lowerCamelCase):
if matrix[i][row] != 0:
__UpperCamelCase , __UpperCamelCase : List[str] = matrix[i], matrix[row]
__UpperCamelCase : Dict = False
break
if reduce:
rank -= 1
for i in range(_lowerCamelCase):
__UpperCamelCase : Tuple = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[str] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 232 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
A_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCamelCase ) )
return round(_UpperCamelCase, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case :
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Tuple=3_2 * 4 , UpperCamelCase__ : Union[str, Any]=3_2 * 6 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[str]=3_2 , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[Any] = is_training
__lowerCAmelCase: List[str] = use_auxiliary_loss
__lowerCAmelCase: Dict = num_queries
__lowerCAmelCase: int = num_channels
__lowerCAmelCase: Dict = min_size
__lowerCAmelCase: Dict = max_size
__lowerCAmelCase: str = num_labels
__lowerCAmelCase: Any = mask_feature_size
def lowercase_ ( self : str)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
UpperCamelCase__)
__lowerCAmelCase: Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase__)
__lowerCAmelCase: List[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase__) > 0.5
).float()
__lowerCAmelCase: List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase__) > 0.5).long()
__lowerCAmelCase: int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self : str)-> Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase: Union[str, Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase_ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str)-> str:
'''simple docstring'''
__lowerCAmelCase: List[Any] = output.encoder_hidden_states
__lowerCAmelCase: Tuple = output.pixel_decoder_hidden_states
__lowerCAmelCase: Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCamelCase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCamelCase__) , config.decoder_config.decoder_layers)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any=False)-> str:
'''simple docstring'''
with torch.no_grad():
__lowerCAmelCase: Optional[int] = MaskFormerModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Dict = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , output_hidden_states=UpperCamelCase__)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Dict = MaskFormerForInstanceSegmentation(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
def comm_check_on_output(UpperCamelCase__ : List[str]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
__lowerCAmelCase: int = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__)
__lowerCAmelCase: List[Any] = model(UpperCamelCase__)
comm_check_on_output(UpperCamelCase__)
__lowerCAmelCase: Tuple = model(
pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__)
comm_check_on_output(UpperCamelCase__)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class snake_case ( __snake_case, __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : int = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Dict = False
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: List[str] = MaskFormerModelTester(self)
__lowerCAmelCase: Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__)
def lowercase_ ( self : Dict)-> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase__)
@unittest.skip(reason="MaskFormer does not use inputs_embeds")
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method")
def lowercase_ ( self : Tuple)-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer is not a generative model")
def lowercase_ ( self : int)-> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not use token embeddings")
def lowercase_ ( self : Dict)-> List[str]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`")
def lowercase_ ( self : List[str])-> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def lowercase_ ( self : List[str])-> Optional[Any]:
'''simple docstring'''
pass
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: str = model_class(UpperCamelCase__)
__lowerCAmelCase: List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: int = [*signature.parameters.keys()]
__lowerCAmelCase: List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__)
@slow
def lowercase_ ( self : List[Any])-> Optional[Any]:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowerCAmelCase: Dict = MaskFormerModel.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = (self.model_tester.min_size,) * 2
__lowerCAmelCase: int = {
"pixel_values": torch.randn((2, 3, *size) , device=UpperCamelCase__),
"mask_labels": torch.randn((2, 1_0, *size) , device=UpperCamelCase__),
"class_labels": torch.zeros(2 , 1_0 , device=UpperCamelCase__).long(),
}
__lowerCAmelCase: Optional[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(UpperCamelCase__)
__lowerCAmelCase: Dict = model(**UpperCamelCase__)
self.assertTrue(outputs.loss is not None)
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: Union[str, Any] = model_class(UpperCamelCase__).to(UpperCamelCase__)
__lowerCAmelCase: Tuple = model(**UpperCamelCase__ , output_attentions=UpperCamelCase__)
self.assertTrue(outputs.attentions is not None)
def lowercase_ ( self : int)-> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowerCAmelCase: Dict = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase: str = model_class(UpperCamelCase__)
model.to(UpperCamelCase__)
model.train()
__lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__).loss
loss.backward()
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase: Any = True
__lowerCAmelCase: int = True
__lowerCAmelCase: Tuple = model_class(UpperCamelCase__)
model.to(UpperCamelCase__)
model.train()
__lowerCAmelCase: int = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCAmelCase: List[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowerCAmelCase: List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCAmelCase: List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase__)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
__A = 1E-4
def a__ ( ) -> Any:
__lowerCAmelCase: Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class snake_case ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Any)-> Dict:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco")
if is_vision_available()
else None
)
def lowercase_ ( self : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Any = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco").to(UpperCamelCase__)
__lowerCAmelCase: Dict = self.default_image_processor
__lowerCAmelCase: Dict = prepare_img()
__lowerCAmelCase: Dict = image_processor(UpperCamelCase__ , return_tensors="pt").to(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
__lowerCAmelCase: Union[str, Any] = model(**UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]]).to(UpperCamelCase__)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
__lowerCAmelCase: List[Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]]).to(UpperCamelCase__)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
__lowerCAmelCase: Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]]).to(UpperCamelCase__)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
def lowercase_ ( self : int)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco")
.to(UpperCamelCase__)
.eval()
)
__lowerCAmelCase: List[Any] = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: int = image_processor(UpperCamelCase__ , return_tensors="pt").to(UpperCamelCase__)
__lowerCAmelCase: Any = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
__lowerCAmelCase: Optional[int] = model(**UpperCamelCase__)
# masks_queries_logits
__lowerCAmelCase: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCAmelCase: Optional[Any] = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
__lowerCAmelCase: Optional[int] = torch.tensor(UpperCamelCase__).to(UpperCamelCase__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
# class_queries_logits
__lowerCAmelCase: Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
__lowerCAmelCase: Optional[int] = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
]).to(UpperCamelCase__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
def lowercase_ ( self : List[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff")
.to(UpperCamelCase__)
.eval()
)
__lowerCAmelCase: Dict = self.default_image_processor
__lowerCAmelCase: List[str] = prepare_img()
__lowerCAmelCase: Tuple = image_processor(UpperCamelCase__ , return_tensors="pt").to(UpperCamelCase__)
__lowerCAmelCase: Tuple = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
__lowerCAmelCase: Dict = model(**UpperCamelCase__)
# masks_queries_logits
__lowerCAmelCase: List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCAmelCase: Optional[int] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
__lowerCAmelCase: int = torch.tensor(UpperCamelCase__).to(UpperCamelCase__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
# class_queries_logits
__lowerCAmelCase: List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
__lowerCAmelCase: Any = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]]).to(UpperCamelCase__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
def lowercase_ ( self : Union[str, Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco")
.to(UpperCamelCase__)
.eval()
)
__lowerCAmelCase: List[Any] = self.default_image_processor
__lowerCAmelCase: Optional[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors="pt" , )
__lowerCAmelCase: Union[str, Any] = inputs["pixel_values"].to(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = [el.to(UpperCamelCase__) for el in inputs["mask_labels"]]
__lowerCAmelCase: Optional[Any] = [el.to(UpperCamelCase__) for el in inputs["class_labels"]]
with torch.no_grad():
__lowerCAmelCase: int = model(**UpperCamelCase__)
self.assertTrue(outputs.loss is not None)
| 217 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__A = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__A = {"facebook/blenderbot-3B": 128}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[Any] = BlenderbotTokenizer
def __init__( self : Any , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]="replace" , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : List[Any]="<mask>" , UpperCamelCase__ : Any=False , UpperCamelCase__ : int=True , **UpperCamelCase__ : Union[str, Any] , )-> int:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Dict = getattr(UpperCamelCase__ , pre_tok_state.pop("type"))
__lowerCAmelCase: int = add_prefix_space
__lowerCAmelCase: str = pre_tok_class(**UpperCamelCase__)
__lowerCAmelCase: List[Any] = add_prefix_space
__lowerCAmelCase: Union[str, Any] = "post_processor"
__lowerCAmelCase: Tuple = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
if tokenizer_component_instance:
__lowerCAmelCase: List[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase: str = tuple(state["sep"])
if "cls" in state:
__lowerCAmelCase: Any = tuple(state["cls"])
__lowerCAmelCase: int = False
if state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Dict = add_prefix_space
__lowerCAmelCase: Optional[Any] = True
if state.get("trim_offsets" , UpperCamelCase__) != trim_offsets:
__lowerCAmelCase: Dict = trim_offsets
__lowerCAmelCase: Tuple = True
if changes_to_apply:
__lowerCAmelCase: Dict = getattr(UpperCamelCase__ , state.pop("type"))
__lowerCAmelCase: str = component_class(**UpperCamelCase__)
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase_ ( self : Optional[Any])-> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def lowercase_ ( self : str , UpperCamelCase__ : Union[str, Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else value
__lowerCAmelCase: Any = value
def lowercase_ ( self : Union[str, Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: List[str] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int)-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: List[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__)
return tuple(UpperCamelCase__)
def lowercase_ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = [self.sep_token_id]
__lowerCAmelCase: Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[Any]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Dict , UpperCamelCase__ : "Conversation")-> List[int]:
'''simple docstring'''
__lowerCAmelCase: str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = " ".join(UpperCamelCase__)
__lowerCAmelCase: Tuple = self.encode(UpperCamelCase__)
if len(UpperCamelCase__) > self.model_max_length:
__lowerCAmelCase: int = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.")
return input_ids
| 217 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = """"""
else:
snake_case_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = val
def _a ( ) -> Dict:
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> Any:
snake_case_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case_ = 8
# set labels if required
if not base_model:
snake_case_ = 1_000
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case_ = 384
snake_case_ = 1_536
snake_case_ = 12
snake_case_ = 6
# load original model from torch hub
snake_case_ = torch.hub.load("""facebookresearch/dino:main""" , _SCREAMING_SNAKE_CASE )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = original_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if base_model:
snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE , add_pooling_layer=_SCREAMING_SNAKE_CASE ).eval()
else:
snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case_ = ViTImageProcessor()
snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ = encoding["""pixel_values"""]
snake_case_ = model(_SCREAMING_SNAKE_CASE )
if base_model:
snake_case_ = original_model(_SCREAMING_SNAKE_CASE )
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
snake_case_ = original_model(_SCREAMING_SNAKE_CASE )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 233 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : List[Any] = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
lowercase : Any = {"""allegro/herbert-base-cased""": 5_1_4}
lowercase : str = {}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Any = VOCAB_FILES_NAMES
__A : str = PRETRAINED_VOCAB_FILES_MAP
__A : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = HerbertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase="</s>" , **lowercase , ) -> List[Any]:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , sep_token=lowercase , **lowercase , )
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : str = [self.cls_token_id]
a__ : Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self , lowercase , lowercase = None , lowercase = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase)
if token_ids_a is None:
return [1] + ([0] * len(lowercase)) + [1]
return [1] + ([0] * len(lowercase)) + [1] + ([0] * len(lowercase)) + [1]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : Any = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
| 99 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
return False
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCAmelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : str = input("""Enter numbers separated by comma:\n""").strip()
lowercase__ : Optional[int] = [int(item.strip()) for item in user_input.split(""",""")]
lowercase__ : Optional[Any] = int(input("""Enter the number to be found in the list:\n""").strip())
lowercase__ : int = """""" if binary_search(sequence, target) else """not """
print(f'{target} was {not_str}found in {sequence}')
| 224 | 0 |
def lowerCamelCase__ ( UpperCamelCase__ : int = 4_000_000 ) -> int:
'''simple docstring'''
_snake_case = []
_snake_case , _snake_case = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(UpperCamelCase__ )
_snake_case , _snake_case = b, a + b
return sum(UpperCamelCase__ )
if __name__ == "__main__":
print(F"{solution() = }")
| 295 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295 | 1 |
from statistics import mean
import numpy as np
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = 0
# Number of processes finished
UpperCamelCase__ : List[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCamelCase__ : Union[str, Any] = [0] * no_of_process
# List to include calculation results
UpperCamelCase__ : Union[str, Any] = [0] * no_of_process
# Sort by arrival time.
UpperCamelCase__ : Any = [burst_time[i] for i in np.argsort(lowerCamelCase_ )]
UpperCamelCase__ : Dict = [process_name[i] for i in np.argsort(lowerCamelCase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCamelCase__ : Dict = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCamelCase__ : List[str] = arrival_time[i]
UpperCamelCase__ : Optional[int] = 0
# Index showing the location of the process being performed
UpperCamelCase__ : str = 0
# Saves the current response ratio.
UpperCamelCase__ : Optional[int] = 0
for i in range(0 , lowerCamelCase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCamelCase__ : Optional[Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCamelCase__ : Union[str, Any] = temp
UpperCamelCase__ : Optional[int] = i
# Calculate the turn around time
UpperCamelCase__ : List[str] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCamelCase__ : List[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = [0] * no_of_process
for i in range(0 , lowerCamelCase_ ):
UpperCamelCase__ : Any = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__UpperCamelCase : int = 5
__UpperCamelCase : Any = ['A', 'B', 'C', 'D', 'E']
__UpperCamelCase : Optional[int] = [1, 2, 3, 4, 5]
__UpperCamelCase : Union[str, Any] = [1, 2, 3, 4, 5]
__UpperCamelCase : Tuple = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__UpperCamelCase : str = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"
f"{turn_around_time[i]}\t\t\t{waiting_time[i]}"
)
print(f"average waiting time : {mean(waiting_time):.5f}")
print(f"average turn around time : {mean(turn_around_time):.5f}")
| 146 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : pyspark.sql.DataFrame, lowerCamelCase : Optional[NamedSplit] = None, lowerCamelCase : Optional[Features] = None, lowerCamelCase : bool = True, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : str = None, lowerCamelCase : bool = True, lowerCamelCase : str = "arrow", **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(
split=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase, streaming=lowerCamelCase, **lowerCamelCase, )
lowercase__ = load_from_cache_file
lowercase__ = file_format
lowercase__ = Spark(
df=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, working_dir=lowerCamelCase, **lowerCamelCase, )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCamelCase, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 207 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'xmod'
def __init__(self , __lowercase=3_05_22 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , __lowercase=False , __lowercase=2 , __lowercase=False , __lowercase=True , __lowercase=True , __lowercase=("en_XX",) , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
__lowerCAmelCase = pre_norm
__lowerCAmelCase = adapter_reduction_factor
__lowerCAmelCase = adapter_layer_norm
__lowerCAmelCase = adapter_reuse_layer_norm
__lowerCAmelCase = ln_before_adapter
__lowerCAmelCase = list(__lowercase )
__lowerCAmelCase = default_language
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch', 'scipy']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 9 | 1 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase =list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase =[file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase =[file for file in filepaths if " " in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print("\n".join(space_files) + "\n")
__UpperCAmelCase =[file for file in filepaths if "-" in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase =[file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase =len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 67 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 | 0 |
import numpy as np
def lowercase( UpperCamelCase_ ) -> np.array:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 | def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = len(matrix[0] )
UpperCamelCase = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase = matrix[i], matrix[row]
UpperCamelCase = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( A_ ,A_ ):
@register_to_config
def __init__(self : Dict , snake_case__ : bool , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case : Optional[int] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case : Optional[int] = torch.zeros(snake_case__ , snake_case__ )
else:
snake_case : Any = None
snake_case : Optional[Any] = torch.nn.Parameter(snake_case__ )
class UpperCAmelCase ( A_ ):
A__ : VQModel
A__ : CLIPTextModel
A__ : CLIPTokenizer
A__ : TransformeraDModel
A__ : LearnedClassifierFreeSamplingEmbeddings
A__ : VQDiffusionScheduler
def __init__(self : str , snake_case__ : VQModel , snake_case__ : CLIPTextModel , snake_case__ : CLIPTokenizer , snake_case__ : TransformeraDModel , snake_case__ : VQDiffusionScheduler , snake_case__ : LearnedClassifierFreeSamplingEmbeddings , ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Any , snake_case__ : int , snake_case__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1
# get prompt text embeddings
snake_case : Optional[Any] = self.tokenizer(
snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
snake_case : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case : str = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate text embeddings for each generation per prompt
snake_case : Dict = prompt_embeds.repeat_interleave(snake_case__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case : Optional[int] = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case : Any = negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 )
else:
snake_case : Optional[int] = [""] * batch_size
snake_case : int = text_input_ids.shape[-1]
snake_case : Any = self.tokenizer(
snake_case__ , padding="max_length" , max_length=snake_case__ , truncation=snake_case__ , return_tensors="pt" , )
snake_case : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case : Optional[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case : Dict = negative_prompt_embeds.shape[1]
snake_case : Any = negative_prompt_embeds.repeat(1 , snake_case__ , 1 )
snake_case : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__(self : Any , snake_case__ : Union[str, List[str]] , snake_case__ : int = 1_00 , snake_case__ : float = 5.0 , snake_case__ : float = 1.0 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = 1
elif isinstance(snake_case__ , snake_case__ ):
snake_case : int = len(snake_case__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}""" )
snake_case : List[str] = batch_size * num_images_per_prompt
snake_case : Union[str, Any] = guidance_scale > 1.0
snake_case : Optional[int] = self._encode_prompt(snake_case__ , snake_case__ , snake_case__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(snake_case__ )}.""" )
# get the initial completely masked latents unless the user supplied it
snake_case : Tuple = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case : Optional[int] = self.transformer.num_vector_embeds - 1
snake_case : Optional[int] = torch.full(snake_case__ , snake_case__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ , device=self.device )
snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
snake_case : List[str] = latents
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the sample if we are doing classifier free guidance
snake_case : List[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case : Tuple = self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample
if do_classifier_free_guidance:
snake_case , snake_case : Optional[Any] = model_output.chunk(2 )
snake_case : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ )
snake_case : Tuple = self.truncate(snake_case__ , snake_case__ )
# remove `log(0)`'s (`-inf`s)
snake_case : List[str] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : int = self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
snake_case : List[Any] = self.vqvae.config.vq_embed_dim
snake_case : List[str] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case : List[str] = self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ )
snake_case : Optional[Any] = self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample
snake_case : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : int = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : float ) -> torch.FloatTensor:
'''simple docstring'''
snake_case , snake_case : Optional[int] = torch.sort(snake_case__ , 1 , descending=snake_case__ )
snake_case : List[Any] = torch.exp(snake_case__ )
snake_case : List[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case : Optional[Any] = torch.full_like(keep_mask[:, 0:1, :] , snake_case__ )
snake_case : List[Any] = torch.cat((all_true, keep_mask) , dim=1 )
snake_case : str = keep_mask[:, :-1, :]
snake_case : Tuple = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case : Any = log_p_x_0.clone()
snake_case : List[Any] = -torch.inf # -inf = log(0)
return rv
| 59 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_UpperCamelCase = random.Random()
def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
if rng is None:
__lowerCAmelCase : Optional[Any] = global_rng
__lowerCAmelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowercase (unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : Any = min_seq_length
__lowerCAmelCase : Tuple = max_seq_length
__lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : Dict = feature_size
__lowerCAmelCase : Optional[int] = padding_value
__lowerCAmelCase : Tuple = sampling_rate
__lowerCAmelCase : Union[str, Any] = return_attention_mask
__lowerCAmelCase : Dict = do_normalize
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , A_=False , A_=False ) ->Union[str, Any]:
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCAmelCase : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase : Tuple = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = WavaVecaFeatureExtractor
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase__ ( self , A_ ) ->Optional[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Any = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
__lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values
__lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCAmelCase : List[Any] = np.asarray(A_ )
__lowerCAmelCase : Any = feat_extract(A_ , return_tensors='''np''' ).input_values
__lowerCAmelCase : Union[str, Any] = feat_extract(A_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : str = ['''longest''', '''max_length''', '''do_not_pad''']
__lowerCAmelCase : str = [None, 1600, None]
for max_length, padding in zip(A_ , A_ ):
__lowerCAmelCase : Optional[int] = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''' )
__lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Optional[int] = range(800 , 1400 , 200 )
__lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
__lowerCAmelCase : int = ['''longest''', '''max_length''', '''do_not_pad''']
__lowerCAmelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(A_ , A_ ):
__lowerCAmelCase : Union[str, Any] = feat_extract(A_ , max_length=A_ , padding=A_ )
__lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : List[str] = feat_extract(
A_ , truncation=A_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
__lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : int = feat_extract(
A_ , truncation=A_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
__lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Optional[int] = feat_extract(
A_ , truncation=A_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
__lowerCAmelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
import torch
__lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Any = np.random.rand(100 ).astype(np.floataa )
__lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCAmelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(A_ )
__lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 275 | 0 |
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
__snake_case: Dict = """"""
for i in table:
res += inp[i - 1]
return res
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
return data[1:] + data[0]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
__snake_case: Union[str, Any] = """"""
for i in range(len(SCREAMING_SNAKE_CASE__)):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
__snake_case: Optional[Any] = int("""0b""" + data[0] + data[-1] , 2)
__snake_case: Dict = int("""0b""" + data[1:3] , 2)
return bin(s[row][col])[2:]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Dict = message[:4]
__snake_case: Dict = message[4:]
__snake_case: Dict = apply_table(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Any = xor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Dict = apply_sbox(SCREAMING_SNAKE_CASE__ , temp[:4]) # noqa: E741
__snake_case: Optional[Any] = apply_sbox(SCREAMING_SNAKE_CASE__ , temp[4:])
__snake_case: Optional[Any] = """0""" * (2 - len(SCREAMING_SNAKE_CASE__)) + l # noqa: E741
__snake_case: List[str] = """0""" * (2 - len(SCREAMING_SNAKE_CASE__)) + r
__snake_case: Optional[int] = apply_table(l + r , SCREAMING_SNAKE_CASE__)
__snake_case: Any = xor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : List[str] = input("Enter 10 bit key: ")
__UpperCAmelCase : Dict = input("Enter 8 bit message: ")
__UpperCAmelCase : List[str] = [6, 3, 7, 4, 8, 5, 10, 9]
__UpperCAmelCase : Dict = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__UpperCAmelCase : List[str] = [2, 4, 3, 1]
__UpperCAmelCase : Optional[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[int] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : str = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : Optional[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : str = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Dict = temp[5:]
__UpperCAmelCase : List[Any] = left_shift(left)
__UpperCAmelCase : List[Any] = left_shift(right)
__UpperCAmelCase : str = apply_table(left + right, pa_table)
__UpperCAmelCase : int = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Dict = left_shift(right)
__UpperCAmelCase : Union[str, Any] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Optional[int] = apply_table(message, IP)
__UpperCAmelCase : Any = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Tuple = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : Tuple = apply_table(CT, IP)
__UpperCAmelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Dict = temp[4:] + temp[:4]
__UpperCAmelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Optional[int] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 293 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionLatentUpscalePipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
lowerCAmelCase__ = True
@property
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = (16, 16)
__SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=_UpperCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=_UpperCAmelCase , only_cross_attention=_UpperCAmelCase , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
__SCREAMING_SNAKE_CASE = EulerDiscreteScheduler(prediction_type="""sample""" )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""quick_gelu""" , projection_dim=512 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=0 ) -> Optional[int]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''cpu'''
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = pipe(**_UpperCAmelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
__SCREAMING_SNAKE_CASE = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
__SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**_UpperCAmelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , scheduler_enum.name )
__SCREAMING_SNAKE_CASE = scheduler_cls.from_config(pipe.scheduler.config )
__SCREAMING_SNAKE_CASE = pipe(**_UpperCAmelCase )[0]
outputs.append(_UpperCAmelCase )
assert check_same_shape(_UpperCAmelCase )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__SCREAMING_SNAKE_CASE = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
__SCREAMING_SNAKE_CASE = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__SCREAMING_SNAKE_CASE = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , output_type="""latent""" ).images
__SCREAMING_SNAKE_CASE = upscaler(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCAmelCase , output_type="""np""" , ).images[0]
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
__SCREAMING_SNAKE_CASE = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
__SCREAMING_SNAKE_CASE = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
__SCREAMING_SNAKE_CASE = upscaler(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCAmelCase , output_type="""np""" , ).images[0]
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 267 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger()
def __A ( a_ :int , a_ :str , a_ :LevitConfig , a_ :Path , a_ :bool = True) -> Union[str, Any]:
print(F"""Converting {name}...""")
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__a : Optional[int] = timm.create_model('''levit_128s''' , pretrained=a_)
else:
__a : List[Any] = timm.create_model('''levit_128''' , pretrained=a_)
if hidden_sizes == 1_92:
__a : List[Any] = timm.create_model('''levit_192''' , pretrained=a_)
if hidden_sizes == 2_56:
__a : Any = timm.create_model('''levit_256''' , pretrained=a_)
if hidden_sizes == 3_84:
__a : Optional[int] = timm.create_model('''levit_384''' , pretrained=a_)
from_model.eval()
__a : Dict = LevitForImageClassificationWithTeacher(a_).eval()
__a : Optional[int] = OrderedDict()
__a : Tuple = from_model.state_dict()
__a : Dict = list(from_model.state_dict().keys())
__a : str = list(our_model.state_dict().keys())
print(len(a_) , len(a_))
for i in range(len(a_)):
__a : int = weights[og_keys[i]]
our_model.load_state_dict(a_)
__a : Union[str, Any] = torch.randn((2, 3, 2_24, 2_24))
__a : Union[str, Any] = from_model(a_)
__a : Optional[int] = our_model(a_).logits
assert torch.allclose(a_ , a_), "The model logits don't match the original one."
__a : List[Any] = name
print(a_)
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name)
__a : Tuple = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name)
print(F"""Pushed {checkpoint_name}""")
def __A ( a_ :Path , a_ :str = None , a_ :bool = True) -> Optional[Any]:
__a : List[Any] = '''imagenet-1k-id2label.json'''
__a : Tuple = 10_00
__a : List[str] = (1, num_labels)
__a : Union[str, Any] = '''huggingface/label-files'''
__a : Dict = num_labels
__a : List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : str = {int(a_): v for k, v in idalabel.items()}
__a : int = idalabel
__a : List[str] = {v: k for k, v in idalabel.items()}
__a : Optional[int] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_)
__a : Optional[int] = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
__a : int = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , a_ , names_to_config[model_name] , a_ , a_)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , a_ , a_ , a_ , a_)
return config, expected_shape
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
A = parser.parse_args()
A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 160 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[Any] = ['image_processor', 'tokenizer']
lowerCAmelCase : Optional[int] = 'CLIPImageProcessor'
lowerCAmelCase : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Tuple ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : List[str]=None ,**_UpperCAmelCase : int ):
_a : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,_UpperCAmelCase ,)
_a : Optional[int] = kwargs.pop('feature_extractor' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase ,_UpperCAmelCase )
def __call__( self : Union[str, Any] ,_UpperCAmelCase : List[str]=None ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : Any=None ,**_UpperCAmelCase : Tuple ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_a : Optional[int] = self.tokenizer(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if images is not None:
_a : List[str] = self.image_processor(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if text is not None and images is not None:
_a : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) ,tensor_type=_UpperCAmelCase )
def __lowercase ( self : Any ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Optional[Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Tuple ,*_UpperCAmelCase : Optional[Any] ,**_UpperCAmelCase : str ):
return self.tokenizer.decode(*_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : List[str] ):
_a : Tuple = self.tokenizer.model_input_names
_a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,_UpperCAmelCase ,)
return self.image_processor_class
@property
def __lowercase ( self : Union[str, Any] ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,_UpperCAmelCase ,)
return self.image_processor
| 107 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowerCAmelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowerCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowerCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[str, float]:
_a : List[Any] = len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[str, str]:
_a : Dict = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
_a : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
_a : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : Optional[Any] = list(lowerCAmelCase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_a : Optional[int] = random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> list[str]:
_a : List[str] = []
# Generate more children proportionally to the fitness score.
_a : Tuple = int(parent_a[1] * 100 ) + 1
_a : Tuple = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
_a : Any = population_score[random.randint(0 , lowerCAmelCase_ )][0]
_a , _a : Tuple = crossover(parent_a[0] , lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) )
return pop
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_a : Dict = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_a : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_a : List[Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
_a : Union[str, Any] = []
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_a , _a : Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_a : Optional[Any] = [evaluate(lowerCAmelCase_ , lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
_a : Tuple = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] , reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_a : Dict = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
_a : Tuple = [
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )] , lowerCAmelCase_ , lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
__lowerCAmelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowerCAmelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 107 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_A = '''http://www.mocksite.com/file1.txt'''
_A = '''"text": ["foo", "foo"]'''
_A = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class lowercase_ :
A__ : Any = 200
A__ : str = {"""Content-Length""": """100"""}
A__ : List[str] = {}
def lowerCamelCase_ ( self , **__UpperCamelCase ):
"""simple docstring"""
return [bytes(__UpperCamelCase , """utf-8""" )]
def lowerCamelCase__ ( *a__ : Dict , **a__ : Any ) -> Optional[Any]:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def lowerCamelCase__ ( a__ : Optional[Any] , a__ : Any , a__ : Dict ) -> Optional[int]:
import requests
monkeypatch.setattr(a__ , """request""" , a__ )
UpperCamelCase_ = URL
if issubclass(a__ , a__ ):
UpperCamelCase_ = url
elif issubclass(a__ , a__ ):
UpperCamelCase_ = [url]
elif issubclass(a__ , a__ ):
UpperCamelCase_ = {"""train""": url}
UpperCamelCase_ = """dummy"""
UpperCamelCase_ = """downloads"""
UpperCamelCase_ = tmp_path
UpperCamelCase_ = DownloadConfig(
cache_dir=os.path.join(a__ , a__ ) , use_etag=a__ , )
UpperCamelCase_ = DownloadManager(dataset_name=a__ , download_config=a__ )
UpperCamelCase_ = dl_manager.download(a__ )
UpperCamelCase_ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(a__ , a__ ):
UpperCamelCase_ = [downloaded_paths]
UpperCamelCase_ = [urls]
elif isinstance(a__ , a__ ):
assert "train" in downloaded_paths.keys()
UpperCamelCase_ = downloaded_paths.values()
UpperCamelCase_ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(a__ , a__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
UpperCamelCase_ = Path(a__ )
UpperCamelCase_ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
UpperCamelCase_ = downloaded_path.read_text()
assert content == CONTENT
UpperCamelCase_ = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
UpperCamelCase_ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def lowerCamelCase__ ( a__ : List[str] , a__ : int , a__ : Union[str, Any] ) -> str:
UpperCamelCase_ = str(a__ )
if issubclass(a__ , a__ ):
UpperCamelCase_ = filename
elif issubclass(a__ , a__ ):
UpperCamelCase_ = [filename]
elif issubclass(a__ , a__ ):
UpperCamelCase_ = {"""train""": filename}
UpperCamelCase_ = """dummy"""
UpperCamelCase_ = xz_file.parent
UpperCamelCase_ = """extracted"""
UpperCamelCase_ = DownloadConfig(
cache_dir=a__ , use_etag=a__ , )
UpperCamelCase_ = DownloadManager(dataset_name=a__ , download_config=a__ )
UpperCamelCase_ = dl_manager.extract(a__ )
UpperCamelCase_ = paths
for extracted_paths in [extracted_paths]:
if isinstance(a__ , a__ ):
UpperCamelCase_ = [extracted_paths]
UpperCamelCase_ = [paths]
elif isinstance(a__ , a__ ):
assert "train" in extracted_paths.keys()
UpperCamelCase_ = extracted_paths.values()
UpperCamelCase_ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(a__ , a__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
UpperCamelCase_ = Path(a__ )
UpperCamelCase_ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(a__ , etag=a__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
UpperCamelCase_ = extracted_path.read_text()
UpperCamelCase_ = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase__ ( a__ : Any , a__ : Dict ) -> Tuple:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(a__ , start=1 ):
UpperCamelCase_ = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def lowerCamelCase__ ( a__ : int , a__ : Optional[int] ) -> str:
UpperCamelCase_ = request.getfixturevalue(a__ )
UpperCamelCase_ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(a__ ) , start=1 ):
_test_jsonl(a__ , a__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Dict ) -> List[str]:
UpperCamelCase_ = request.getfixturevalue(a__ )
UpperCamelCase_ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(a__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(a__ ) , start=1 ):
_test_jsonl(a__ , a__ )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase__ ( a__ : Any ) -> str:
UpperCamelCase_ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(a__ ) , start=1 ):
assert os.path.basename(a__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 122 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_A = '''\
'''
_A = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
_A = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = True , __UpperCamelCase=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase_ = """cuda"""
else:
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
UpperCamelCase_ = model.to(__UpperCamelCase )
UpperCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase_ = model.config.max_length - 1
else:
UpperCamelCase_ = model.config.max_length
UpperCamelCase_ = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="""pt""" , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
UpperCamelCase_ = encodings["""input_ids"""]
UpperCamelCase_ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase_ = []
UpperCamelCase_ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
UpperCamelCase_ = min(start_index + batch_size , len(__UpperCamelCase ) )
UpperCamelCase_ = encoded_texts[start_index:end_index]
UpperCamelCase_ = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
UpperCamelCase_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
UpperCamelCase_ = encoded_batch
with torch.no_grad():
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
UpperCamelCase_ = out_logits[..., :-1, :].contiguous()
UpperCamelCase_ = labels[..., 1:].contiguous()
UpperCamelCase_ = attn_mask[..., 1:].contiguous()
UpperCamelCase_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
| 122 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = "laion/clap-htsat-unfused"
A_ : List[Any] = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , **snake_case :int ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , **snake_case :Optional[int] ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = self.get_tokenizer()
A_ : int = self.get_feature_extractor()
A_ : List[Any] = ClapProcessor(tokenizer=snake_case , feature_extractor=snake_case )
processor.save_pretrained(self.tmpdirname )
A_ : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
A_ : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ : List[Any] = self.get_feature_extractor(do_normalize=snake_case , padding_value=1.0 )
A_ : Optional[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Tuple = self.get_feature_extractor()
A_ : Any = self.get_tokenizer()
A_ : str = ClapProcessor(tokenizer=snake_case , feature_extractor=snake_case )
A_ : Dict = floats_list((3, 1_000) )
A_ : Tuple = feature_extractor(snake_case , return_tensors="np" )
A_ : str = processor(audios=snake_case , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.get_feature_extractor()
A_ : Tuple = self.get_tokenizer()
A_ : Optional[Any] = ClapProcessor(tokenizer=snake_case , feature_extractor=snake_case )
A_ : str = "This is a test string"
A_ : Any = processor(text=snake_case )
A_ : Tuple = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.get_feature_extractor()
A_ : str = self.get_tokenizer()
A_ : List[Any] = ClapProcessor(tokenizer=snake_case , feature_extractor=snake_case )
A_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(snake_case )
A_ : List[Any] = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Tuple = self.get_feature_extractor()
A_ : Any = self.get_tokenizer()
A_ : Tuple = ClapProcessor(tokenizer=snake_case , feature_extractor=snake_case )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 70 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_lowerCAmelCase : Tuple = logging.getLogger(__name__)
def __snake_case ( ) -> Tuple:
A_ : List[str] = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=_lowerCAmelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=_lowerCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=_lowerCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=_lowerCAmelCase , default="data/dump" , help="The dump file prefix." )
A_ : int = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
A_ : int = BertTokenizer.from_pretrained(args.tokenizer_name )
A_ : Union[str, Any] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
A_ : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
A_ : Dict = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A_ : List[str] = tokenizer.special_tokens_map["cls_token"] # `<s>`
A_ : Any = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
A_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A_ : Any = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
A_ : Union[str, Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
A_ : Union[str, Any] = fp.readlines()
logger.info("Start encoding" )
logger.info(f"{len(_lowerCAmelCase )} examples to process." )
A_ : List[Any] = []
A_ : Tuple = 0
A_ : Union[str, Any] = 10000
A_ : Optional[int] = time.time()
for text in data:
A_ : Any = f"{bos} {text.strip()} {sep}"
A_ : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
rslt.append(_lowerCAmelCase )
iter += 1
if iter % interval == 0:
A_ : str = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
A_ : Union[str, Any] = time.time()
logger.info("Finished binarization" )
logger.info(f"{len(_lowerCAmelCase )} examples processed." )
A_ : int = f"{args.dump_file}.{args.tokenizer_name}.pickle"
A_ : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
A_ : Union[str, Any] = [np.uintaa(_lowerCAmelCase ) for d in rslt]
else:
A_ : List[str] = [np.intaa(_lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(_lowerCAmelCase , "wb" ) as handle:
pickle.dump(rslt_ , _lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 70 | 1 |
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(length - 1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i
for k in range(i + 1 , lowerCAmelCase ):
if collection[k] < collection[least]:
SCREAMING_SNAKE_CASE_ : Optional[int] = k
if least != i:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowerCamelCase : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 18 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 1 |
from __future__ import annotations
from math import pi, sqrt
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case :Optional[Any] = logging.get_logger(__name__)
__snake_case :List[Any] = '''▁'''
__snake_case :List[Any] = {'''vocab_file''': '''spiece.model'''}
__snake_case :Tuple = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
__snake_case :List[Any] = {
'''google/reformer-crime-and-punishment''': 52_4288,
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : int = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="<unk>" , __SCREAMING_SNAKE_CASE : Any=[] , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Dict):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__a = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE)
return token
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = []
__a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE) + token
__a = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE)
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE)
return out_string.strip()
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(__SCREAMING_SNAKE_CASE , '''wb''') as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 131 | 1 |
from __future__ import annotations
from typing import Any
class A__ ( __a ):
pass
class A__ :
def __init__( self : List[Any] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = data
lowerCAmelCase__ : Node | None = None
def __iter__( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self
lowerCAmelCase__ : List[str] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__a )
yield node.data
lowerCAmelCase__ : Any = node.next_node
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase__ = Node(1)
lowerCamelCase__ = Node(2)
lowerCamelCase__ = Node(3)
lowerCamelCase__ = Node(4)
print(root_node.has_loop) # False
lowerCamelCase__ = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase__ = Node(5)
lowerCamelCase__ = Node(6)
lowerCamelCase__ = Node(5)
lowerCamelCase__ = Node(6)
print(root_node.has_loop) # False
lowerCamelCase__ = Node(1)
print(root_node.has_loop) # False | 212 |
from math import pi
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 233 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = '▁'
_lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCamelCase = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
_lowerCamelCase = {
'facebook/xglm-564M': 20_48,
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ['input_ids', 'attention_mask']
def __init__( self : List[str] , __snake_case : int , __snake_case : Optional[int]="<s>" , __snake_case : int="</s>" , __snake_case : List[str]="</s>" , __snake_case : Union[str, Any]="<s>" , __snake_case : str="<unk>" , __snake_case : str="<pad>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Optional[Any] , ):
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase_ = 7
UpperCAmelCase_ = [F'<madeupword{i}>' for i in range(self.num_madeup_words )]
UpperCAmelCase_ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
UpperCAmelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCAmelCase_ = len(self.sp_model )
UpperCAmelCase_ = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__snake_case )
UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , __snake_case : Any ):
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase_ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case ))
def lowerCamelCase_ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Optional[int] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : List[str] , __snake_case : str ):
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Tuple , __snake_case : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : int ):
UpperCAmelCase_ = ''''''.join(__snake_case ).replace(__snake_case , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_ = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 177 |
import baseaa
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> bytes:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : bytes ) -> str:
return baseaa.baadecode(__UpperCamelCase ).decode('''utf-8''' )
if __name__ == "__main__":
_lowerCamelCase = 'Hello World!'
_lowerCamelCase = baseaa_encode(test)
print(encoded)
_lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 177 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 295 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DistilBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DistilBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DistilBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DistilBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_choices
__lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Any =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ : Optional[int] =(
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =True
UpperCamelCase_ : str =True
UpperCamelCase_ : Union[str, Any] =True
UpperCamelCase_ : Optional[int] =True
def _A (self ):
__lowercase= DistilBertModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= DistilBertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__lowercase= True
__lowercase= model_class(config=lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowercase= torch.jit.trace(
lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) )
__lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' )
__lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
__lowercase= torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowercase= torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
| 295 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( _lowercase ):
lowerCamelCase__: UNetaDModel
lowerCamelCase__: ScoreSdeVeScheduler
def __init__( self: List[Any] , __lowerCamelCase: UNetaDModel , __lowerCamelCase: ScoreSdeVeScheduler ) -> Optional[int]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self: Union[str, Any] , __lowerCamelCase: int = 1 , __lowerCamelCase: int = 20_00 , __lowerCamelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase: Optional[str] = "pil" , __lowerCamelCase: bool = True , **__lowerCamelCase: Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCAmelCase : List[Any] = self.unet.config.sample_size
__UpperCAmelCase : Dict = (batch_size, 3, img_size, img_size)
__UpperCAmelCase : Optional[int] = self.unet
__UpperCAmelCase : Optional[Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase ) * self.scheduler.init_noise_sigma
__UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCamelCase )
self.scheduler.set_sigmas(__lowerCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCAmelCase : Dict = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
__UpperCAmelCase : Optional[int] = self.scheduler.step_correct(__lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# prediction step
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ).sample
__UpperCAmelCase : Tuple = self.scheduler.step_pred(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
__UpperCAmelCase : Optional[int] = sample_mean.clamp(0 , 1 )
__UpperCAmelCase : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : Dict = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 370 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] ={
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''xmod'''
def __init__( self :List[str] , lowerCAmelCase__ :List[str]=30_522 , lowerCAmelCase__ :Tuple=768 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Union[str, Any]=1 , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Any="absolute" , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=("en_XX",) , lowerCAmelCase__ :Dict=None , **lowerCAmelCase__ :Optional[int] , ) -> Union[str, Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
__SCREAMING_SNAKE_CASE : str = use_cache
__SCREAMING_SNAKE_CASE : Any = classifier_dropout
__SCREAMING_SNAKE_CASE : Tuple = pre_norm
__SCREAMING_SNAKE_CASE : int = adapter_reduction_factor
__SCREAMING_SNAKE_CASE : Optional[Any] = adapter_layer_norm
__SCREAMING_SNAKE_CASE : str = adapter_reuse_layer_norm
__SCREAMING_SNAKE_CASE : List[str] = ln_before_adapter
__SCREAMING_SNAKE_CASE : Optional[int] = list(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = default_language
class _lowercase ( A__ ):
'''simple docstring'''
@property
def __magic_name__( self :str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 1 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : str=99 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : int=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Union[str, Any]=36 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : List[Any]=5_12 , UpperCAmelCase : Dict=16 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : str=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Any=None , ):
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_input_mask
lowerCAmelCase_ : Tuple = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : List[str] = type_sequence_label_size
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : List[str] = num_choices
lowerCAmelCase_ : Optional[int] = scope
def A ( self : List[str] ):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Dict = None
if self.use_input_mask:
lowerCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : str ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def A ( self : List[Any] ):
lowerCAmelCase_ : Any = self.get_config()
lowerCAmelCase_ : List[str] = 3_00
return config
def A ( self : Tuple ):
(
lowerCAmelCase_
) : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : str = MraModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCAmelCase_ : int = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Tuple , ):
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Optional[Any] = MraModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
lowerCAmelCase_ : str = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
lowerCAmelCase_ : List[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Union[str, Any] = MraForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : List[str] = MraForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : List[Any] = MraForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : Any = self.num_labels
lowerCAmelCase_ : Tuple = MraForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : int = self.num_choices
lowerCAmelCase_ : int = MraForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ):
lowerCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : str = config_and_inputs
lowerCAmelCase_ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = False
__snake_case : List[Any] = False
__snake_case : Optional[Any] = False
__snake_case : Union[str, Any] = False
__snake_case : Optional[Any] = ()
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[Any] = MraModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A ( self : int ):
self.config_tester.run_common_tests()
def A ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : Dict = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A ( self : List[str] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A ( self : Tuple ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A ( self : List[str] ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = MraModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def A ( self : Any ):
return
@require_torch
class __a ( unittest.TestCase ):
@slow
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
lowerCAmelCase_ : Optional[Any] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ : int = model(UpperCAmelCase )[0]
lowerCAmelCase_ : str = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
lowerCAmelCase_ : Union[str, Any] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(UpperCAmelCase )[0]
lowerCAmelCase_ : int = 5_02_65
lowerCAmelCase_ : int = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
lowerCAmelCase_ : List[str] = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )[0]
lowerCAmelCase_ : List[Any] = 5_02_65
lowerCAmelCase_ : Dict = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 355 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : int = (wi_a, wi_a)
else:
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Optional[int] = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : Union[str, Any] = q.T
lowerCAmelCase_ : Any = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
lowerCAmelCase_ : Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Dict = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : Union[str, Any] = o.T
lowerCAmelCase_ : Any = q.T
lowerCAmelCase_ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Any = k.T
lowerCAmelCase_ : Any = o.T
lowerCAmelCase_ : Optional[int] = q.T
lowerCAmelCase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : List[Any] = wi[1].T
else:
lowerCAmelCase_ : Optional[Any] = wi.T
lowerCAmelCase_ : str = wo.T
lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 28 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : List[str] ={"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] =["YolosFeatureExtractor"]
_UpperCAmelCase : Optional[int] =["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =[
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 262 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Dict = 'detr'
lowerCamelCase__ : Union[str, Any] = ['past_key_values']
lowerCamelCase__ : Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] , __UpperCAmelCase : Any=True , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : int=1_0_0 , __UpperCAmelCase : Optional[Any]=6 , __UpperCAmelCase : List[str]=2_0_4_8 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : Optional[int]=6 , __UpperCAmelCase : Dict=2_0_4_8 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any="relu" , __UpperCAmelCase : Dict=2_5_6 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Optional[int]=1.0 , __UpperCAmelCase : Dict=False , __UpperCAmelCase : str="sine" , __UpperCAmelCase : str="resnet50" , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=False , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[Any]=1 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : Union[str, Any]=5 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : List[str]=0.1 , **__UpperCAmelCase : Dict , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(__UpperCAmelCase )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None, None, None
SCREAMING_SNAKE_CASE__ = use_timm_backbone
SCREAMING_SNAKE_CASE__ = backbone_config
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_queries
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = init_xavier_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = auxiliary_loss
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = backbone
SCREAMING_SNAKE_CASE__ = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ = class_cost
SCREAMING_SNAKE_CASE__ = bbox_cost
SCREAMING_SNAKE_CASE__ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : Dict ) -> List[Any]:
return cls(backbone_config=__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, any]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
class lowerCamelCase (A__ ):
lowerCamelCase__ : Union[str, Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> float:
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 1_2
| 165 | 0 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[Any] = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ )
else:
_snake_case : Any = np.full((len(snake_case__ ), sequence_length) , snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Dict = tensor[:sequence_length]
else:
_snake_case : List[Any] = tensor[:sequence_length]
else:
if isinstance(snake_case__ , snake_case__ ):
_snake_case : str = tensor[:sequence_length]
else:
_snake_case : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : str = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_snake_case : Union[str, Any] = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -1_00
lowercase__ = "pt"
def UpperCamelCase_ ( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
import torch
_snake_case : Optional[Any] = """label""" if """label""" in features[0].keys() else """labels"""
_snake_case : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_snake_case : Any = self.tokenizer.pad(
a_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="""pt""" if labels is None else None, )
if labels is None:
return batch
_snake_case : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1]
_snake_case : Any = self.tokenizer.padding_side
if padding_side == "right":
_snake_case : Optional[int] = [
list(a_ ) + [self.label_pad_token_id] * (sequence_length - len(a_ )) for label in labels
]
else:
_snake_case : Union[str, Any] = [
[self.label_pad_token_id] * (sequence_length - len(a_ )) + list(a_ ) for label in labels
]
_snake_case : List[Any] = [feature["""ner_tags"""] for feature in features]
_snake_case : str = padding_tensor(a_, -1, a_, a_ )
_snake_case : Any = [feature["""original_entity_spans"""] for feature in features]
_snake_case : int = padding_tensor(a_, (-1, -1), a_, a_ )
_snake_case : str = {k: torch.tensor(a_, dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 132 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = (16, 32, 96, 2_56)
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
_snake_case : int = []
for i in range(len(self.block_out_channels ) - 1 ):
_snake_case : int = self.block_out_channels[i]
_snake_case : Tuple = self.block_out_channels[i + 1]
_snake_case : Dict = nn.Conv(
a_, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(a_ )
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(a_ )
_snake_case : Any = blocks
_snake_case : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self: Optional[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : int = self.conv_in(a_ )
_snake_case : Optional[int] = nn.silu(a_ )
for block in self.blocks:
_snake_case : Tuple = block(a_ )
_snake_case : int = nn.silu(a_ )
_snake_case : Optional[int] = self.conv_out(a_ )
return embedding
@flax_register_to_config
class lowercase( nn.Module , __a , __a ):
'''simple docstring'''
lowercase__ = 32
lowercase__ = 4
lowercase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase__ = False
lowercase__ = (3_20, 6_40, 12_80, 12_80)
lowercase__ = 2
lowercase__ = 8
lowercase__ = None
lowercase__ = 12_80
lowercase__ = 0.0
lowercase__ = False
lowercase__ = jnp.floataa
lowercase__ = True
lowercase__ = 0
lowercase__ = "rgb"
lowercase__ = (16, 32, 96, 2_56)
def UpperCamelCase_ ( self: int, a_: jax.random.KeyArray ):
'''simple docstring'''
_snake_case : str = (1, self.in_channels, self.sample_size, self.sample_size)
_snake_case : Optional[Any] = jnp.zeros(a_, dtype=jnp.floataa )
_snake_case : List[str] = jnp.ones((1,), dtype=jnp.intaa )
_snake_case : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
_snake_case : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
_snake_case : Optional[int] = jnp.zeros(a_, dtype=jnp.floataa )
_snake_case , _snake_case : Tuple = jax.random.split(a_ )
_snake_case : str = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(a_, a_, a_, a_, a_ )["params"]
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.block_out_channels
_snake_case : Optional[int] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_snake_case : int = self.num_attention_heads or self.attention_head_dim
# input
_snake_case : Union[str, Any] = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
_snake_case : int = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
_snake_case : Any = FlaxTimestepEmbedding(a_, dtype=self.dtype )
_snake_case : Optional[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
_snake_case : List[str] = self.only_cross_attention
if isinstance(a_, a_ ):
_snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(a_, a_ ):
_snake_case : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
_snake_case : List[str] = []
_snake_case : Tuple = []
_snake_case : int = block_out_channels[0]
_snake_case : Optional[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
for i, down_block_type in enumerate(self.down_block_types ):
_snake_case : List[Any] = output_channel
_snake_case : Any = block_out_channels[i]
_snake_case : List[str] = i == len(a_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_snake_case : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=a_, out_channels=a_, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
_snake_case : List[Any] = FlaxDownBlockaD(
in_channels=a_, out_channels=a_, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(a_ )
for _ in range(self.layers_per_block ):
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
if not is_final_block:
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
_snake_case : str = down_blocks
_snake_case : Union[str, Any] = controlnet_down_blocks
# mid
_snake_case : Tuple = block_out_channels[-1]
_snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=a_, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
_snake_case : Tuple = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self: str, a_: Any, a_: Tuple, a_: Any, a_: int, a_: float = 1.0, a_: bool = True, a_: bool = False, ):
'''simple docstring'''
_snake_case : Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_snake_case : List[Any] = jnp.flip(a_, axis=1 )
# 1. time
if not isinstance(a_, jnp.ndarray ):
_snake_case : Any = jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(a_, jnp.ndarray ) and len(timesteps.shape ) == 0:
_snake_case : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
_snake_case : List[str] = jnp.expand_dims(a_, 0 )
_snake_case : List[str] = self.time_proj(a_ )
_snake_case : str = self.time_embedding(a_ )
# 2. pre-process
_snake_case : List[str] = jnp.transpose(a_, (0, 2, 3, 1) )
_snake_case : List[Any] = self.conv_in(a_ )
_snake_case : Union[str, Any] = jnp.transpose(a_, (0, 2, 3, 1) )
_snake_case : Any = self.controlnet_cond_embedding(a_ )
sample += controlnet_cond
# 3. down
_snake_case : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(a_, a_ ):
_snake_case , _snake_case : Optional[Any] = down_block(a_, a_, a_, deterministic=not train )
else:
_snake_case , _snake_case : Dict = down_block(a_, a_, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_snake_case : Dict = self.mid_block(a_, a_, a_, deterministic=not train )
# 5. contronet blocks
_snake_case : Tuple = ()
for down_block_res_sample, controlnet_block in zip(a_, self.controlnet_down_blocks ):
_snake_case : Any = controlnet_block(a_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_snake_case : List[Any] = controlnet_down_block_res_samples
_snake_case : int = self.controlnet_mid_block(a_ )
# 6. scaling
_snake_case : int = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a_, mid_block_res_sample=a_ )
| 132 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'num_attention_heads' ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=3_2 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=6_4_0 , __UpperCAmelCase=4 , __UpperCAmelCase="silu" , __UpperCAmelCase=3 , __UpperCAmelCase=3_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=1_0 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Any = parent
lowerCAmelCase__ :Optional[int] = batch_size
lowerCAmelCase__ :int = image_size
lowerCAmelCase__ :List[Any] = patch_size
lowerCAmelCase__ :Optional[int] = num_channels
lowerCAmelCase__ :Optional[int] = last_hidden_size
lowerCAmelCase__ :Optional[Any] = num_attention_heads
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :Dict = conv_kernel_size
lowerCAmelCase__ :Optional[Any] = output_stride
lowerCAmelCase__ :List[Any] = hidden_dropout_prob
lowerCAmelCase__ :Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ :Optional[int] = classifier_dropout_prob
lowerCAmelCase__ :int = use_labels
lowerCAmelCase__ :int = is_training
lowerCAmelCase__ :int = num_labels
lowerCAmelCase__ :Any = initializer_range
lowerCAmelCase__ :int = scope
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :str = None
if self.use_labels:
lowerCAmelCase__ :Any = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ :Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = MobileViTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.num_labels
lowerCAmelCase__ :List[str] = MobileViTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.num_labels
lowerCAmelCase__ :Union[str, Any] = MobileViTForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :str = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = config_and_inputs
lowerCAmelCase__ :Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ :Optional[Any] = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ :List[str] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :Optional[int] = False
__magic_name__ :int = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = MobileViTModelTester(self )
lowerCAmelCase__ :List[str] = MobileViTConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :Any = model_class(__UpperCAmelCase )
lowerCAmelCase__ :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :List[str] = [*signature.parameters.keys()]
lowerCAmelCase__ :Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ :List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ :str = outputs.hidden_states
lowerCAmelCase__ :Any = 5
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase__ :List[Any] = 2
for i in range(len(__UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :List[str] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ :Tuple = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :Optional[Any] = MobileViTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __A () ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__UpperCAmelCase )
lowerCAmelCase__ :str = self.default_image_processor
lowerCAmelCase__ :Optional[Any] = prepare_img()
lowerCAmelCase__ :Dict = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ :str = model(**__UpperCAmelCase )
# verify the logits
lowerCAmelCase__ :Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowerCAmelCase__ :Optional[int] = model.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowerCAmelCase__ :Any = prepare_img()
lowerCAmelCase__ :Any = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ :List[str] = model(**__UpperCAmelCase )
lowerCAmelCase__ :Dict = outputs.logits
# verify the logits
lowerCAmelCase__ :List[Any] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowerCAmelCase__ :List[Any] = model.to(__UpperCAmelCase )
lowerCAmelCase__ :str = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowerCAmelCase__ :str = prepare_img()
lowerCAmelCase__ :int = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ :int = model(**__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = outputs.logits.detach().cpu()
lowerCAmelCase__ :Tuple = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(5_0, 6_0)] )
lowerCAmelCase__ :Optional[int] = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
lowerCAmelCase__ :str = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 293 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293 | 1 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_SCREAMING_SNAKE_CASE ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 366 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase__ = None
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowercase__ = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
lowercase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["""input_ids""", """attention_mask"""]
a__ = MBartTokenizer
a__ = []
a__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
a__: Tuple = vocab_file
a__: Union[str, Any] = False if not self.vocab_file else True
a__: Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens})
a__: int = {
lang_code: self.convert_tokens_to_ids(lowercase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a__: List[Any] = src_lang if src_lang is not None else 'en_XX'
a__: Tuple = self.convert_tokens_to_ids(self._src_lang)
a__: str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Any = [self.sep_token_id]
a__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
a__: Union[str, Any] = src_lang
a__: Any = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase)
a__: str = self.convert_tokens_to_ids(lowercase)
a__: Any = tgt_lang_id
return inputs
def lowerCamelCase_ ( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
a__: Any = src_lang
a__: List[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: int = self.convert_tokens_to_ids(lowercase)
a__: List[Any] = []
a__: List[str] = [self.eos_token_id, self.cur_lang_code]
a__: Dict = self.convert_ids_to_tokens(self.prefix_tokens)
a__: Any = self.convert_ids_to_tokens(self.suffix_tokens)
a__: int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: str = self.convert_tokens_to_ids(lowercase)
a__: List[Any] = []
a__: Dict = [self.eos_token_id, self.cur_lang_code]
a__: Any = self.convert_ids_to_tokens(self.prefix_tokens)
a__: Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens)
a__: str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowercase):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
a__: Any = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase):
copyfile(self.vocab_file , lowercase)
return (out_vocab_file,)
| 203 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """openai-gpt"""
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : List[str]=4_04_78 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : List[str]=7_68 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]="cls_index" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=0.1 , **__lowerCamelCase : Union[str, Any] , ) -> List[str]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCamelCase )
| 107 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[tuple[float, float]] ) -> Tuple:
a = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a = len(__lowerCamelCase ) - 1
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowerCamelCase ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = self.basis_function(__lowerCamelCase )
a = 0.0
a = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float = 0.01 ) -> List[str]:
from matplotlib import pyplot as plt # type: ignore
a = [] # x coordinates of points to plot
a = [] # y coordinates of points to plot
a = 0.0
while t <= 1:
a = self.bezier_curve_function(__lowerCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
a = [i[0] for i in self.list_of_points]
a = [i[1] for i in self.list_of_points]
plt.plot(
__lowerCamelCase , __lowerCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__lowerCamelCase , __lowerCamelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 107 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class UpperCamelCase_ ( a_ ):
def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
UpperCAmelCase = self.model.config
else:
UpperCAmelCase = config
UpperCAmelCase = data_args
UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase = label_smoothed_nll_loss
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
if self.optimizer is None:
UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase = Adafactor
UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
UpperCAmelCase = AdamW
UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
UpperCAmelCase = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
UpperCAmelCase = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def UpperCamelCase_ ( self ) -> Optional[torch.utils.data.Sampler]:
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase = model(**snake_case__ , use_cache=snake_case__ )[0]
UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase , UpperCAmelCase = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
UpperCAmelCase = model(**snake_case__ , use_cache=snake_case__ )[0]
UpperCAmelCase = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
UpperCAmelCase , UpperCAmelCase = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = inputs.pop("""labels""" )
UpperCAmelCase , UpperCAmelCase = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""simple docstring"""
UpperCAmelCase = self._prepare_inputs(snake_case__ )
UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["""max_length"""] )
UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase , UpperCAmelCase = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase = tensor
return padded_tensor
| 248 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase , lowerCAmelCase )
# Predict target for test data
UpperCAmelCase = xgb.predict(lowerCAmelCase )
UpperCAmelCase = predictions.reshape(len(lowerCAmelCase ) , 1 )
return predictions
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = fetch_california_housing()
UpperCAmelCase , UpperCAmelCase = data_handling(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_test_split(
lowerCAmelCase , lowerCAmelCase , test_size=0.25 , random_state=1 )
UpperCAmelCase = xgboost(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase , lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCAmelCase , lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 248 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
A__ : Dict ='''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class UpperCAmelCase ( unittest.TestCase , snake_case_ ):
def lowercase__ ( self : List[Any] ) -> List[Any]:
_lowerCAmelCase = load_tool("""text-question-answering""" )
self.tool.setup()
_lowerCAmelCase = load_tool("""text-question-answering""" , remote=__snake_case )
def lowercase__ ( self : int ) -> int:
_lowerCAmelCase = self.tool(__snake_case , """What did Hugging Face do in April 2021?""" )
self.assertEqual(__snake_case , """launched the BigScience Research Workshop""" )
def lowercase__ ( self : int ) -> Any:
_lowerCAmelCase = self.remote_tool(__snake_case , """What did Hugging Face do in April 2021?""" )
self.assertEqual(__snake_case , """launched the BigScience Research Workshop""" )
def lowercase__ ( self : str ) -> Optional[Any]:
_lowerCAmelCase = self.tool(text=__snake_case , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(__snake_case , """launched the BigScience Research Workshop""" )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
_lowerCAmelCase = self.remote_tool(text=__snake_case , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(__snake_case , """launched the BigScience Research Workshop""" )
| 70 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_lowerCAmelCase = []
for num in range(len(lowerCAmelCase ) ):
_lowerCAmelCase = 0
while 2 * i * i <= odd_composites[num]:
_lowerCAmelCase = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase ) == n:
return list_nums
return []
def UpperCamelCase__ ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = list[tuple[int, int]]
lowercase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : int , a_ : int , a_ : int , a_ : int , a_ : int , a_ : float , a_ : Node | None , ):
lowerCAmelCase_ : List[Any] = pos_x
lowerCAmelCase_ : int = pos_y
lowerCAmelCase_ : List[Any] = (pos_y, pos_x)
lowerCAmelCase_ : Optional[int] = goal_x
lowerCAmelCase_ : List[Any] = goal_y
lowerCAmelCase_ : List[Any] = g_cost
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : Tuple = self.calculate_heuristic()
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : int = abs(self.pos_x - self.goal_x )
lowerCAmelCase_ : Tuple = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[int] , a_ : List[str] ):
return self.f_cost < other.f_cost
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Tuple , a_ : tuple[int, int] , a_ : tuple[int, int] ):
lowerCAmelCase_ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a_ )
lowerCAmelCase_ : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a_ )
lowerCAmelCase_ : List[Any] = [self.start]
lowerCAmelCase_ : list[Node] = []
lowerCAmelCase_ : List[Any] = False
def lowerCamelCase ( self : int ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase_ : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase_ : Any = True
return self.retrace_path(a_ )
self.closed_nodes.append(a_ )
lowerCAmelCase_ : List[Any] = self.get_successors(a_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a_ )
else:
# retrieve the best current path
lowerCAmelCase_ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(a_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a_ )
else:
self.open_nodes.append(a_ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase ( self : Any , a_ : Node ):
lowerCAmelCase_ : List[Any] = []
for action in delta:
lowerCAmelCase_ : Any = parent.pos_x + action[1]
lowerCAmelCase_ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a_ , a_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a_ , ) )
return successors
def lowerCamelCase ( self : int , a_ : Node | None ):
lowerCAmelCase_ : Any = node
lowerCAmelCase_ : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase_ : Dict = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase__ = (0, 0)
lowercase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowercase__ = GreedyBestFirst(init, goal)
lowercase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase__ = 2
for elem in grid:
print(elem)
| 161 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 161 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int=13 , _SCREAMING_SNAKE_CASE : str=3 , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=0.1 , _SCREAMING_SNAKE_CASE : Dict=224 , _SCREAMING_SNAKE_CASE : Optional[Any]=1000 , _SCREAMING_SNAKE_CASE : Union[str, Any]=[3, 3, 6, 4] , _SCREAMING_SNAKE_CASE : str=[48, 56, 112, 220] , )-> Dict:
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : List[str] = is_training
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Tuple = layer_depths
lowerCAmelCase__ : Union[str, Any] = embed_dims
def UpperCAmelCase__( self : str )-> List[Any]:
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ : Dict = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__( self : Tuple )-> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_SCREAMING_SNAKE_CASE , layer_scale_init_value=1E-5 , )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] )-> Dict:
lowerCAmelCase__ : Dict = SwiftFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] )-> str:
lowerCAmelCase__ : Dict = self.num_labels
lowerCAmelCase__ : Any = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowerCAmelCase__ : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__( self : Tuple )-> str:
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : Dict = self.prepare_config_and_inputs()
lowerCAmelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowercase , _lowercase , unittest.TestCase):
_a : Dict = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_a : List[str] = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_a : Union[str, Any] = False
_a : List[str] = False
_a : Optional[int] = False
_a : List[str] = False
_a : Dict = False
def UpperCAmelCase__( self : Dict )-> int:
lowerCAmelCase__ : List[str] = SwiftFormerModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def UpperCAmelCase__( self : Union[str, Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def UpperCAmelCase__( self : Optional[Any] )-> str:
pass
def UpperCAmelCase__( self : Any )-> int:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__( self : str )-> str:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] )-> str:
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[Any] )-> Optional[Any]:
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : Optional[Any] )-> Dict:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[Any] = SwiftFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def UpperCAmelCase__( self : List[str] )-> Any:
pass
def UpperCAmelCase__( self : List[str] )-> str:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ):
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : List[str] = outputs.hidden_states
lowerCAmelCase__ : str = 8
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict )-> Dict:
def _config_zero_init(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowerCAmelCase__ : Union[str, Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1E-10 )
if isinstance(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Optional[int] = _config_zero_init(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return configs_no_init
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__( self : str )-> Any:
pass
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase):
@cached_property
def UpperCAmelCase__( self : str )-> Tuple:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def UpperCAmelCase__( self : Optional[int] )-> Any:
lowerCAmelCase__ : List[str] = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Any = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase__ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 131 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _a ( _lowercase):
def UpperCAmelCase__( self : int )-> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''num_encoder_blocks''' ) )
class _a :
def __init__( self : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any=13 , _SCREAMING_SNAKE_CASE : List[Any]=64 , _SCREAMING_SNAKE_CASE : str=3 , _SCREAMING_SNAKE_CASE : Union[str, Any]=4 , _SCREAMING_SNAKE_CASE : Optional[int]=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE : Tuple=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE : Dict=[16, 32, 64, 128] , _SCREAMING_SNAKE_CASE : Dict=[1, 4, 8, 16] , _SCREAMING_SNAKE_CASE : str=[1, 2, 4, 8] , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Tuple="gelu" , _SCREAMING_SNAKE_CASE : str=0.1 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : List[Any]=0.02 , _SCREAMING_SNAKE_CASE : Any=3 , _SCREAMING_SNAKE_CASE : Optional[int]=None , )-> List[str]:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : Optional[Any] = num_encoder_blocks
lowerCAmelCase__ : Union[str, Any] = sr_ratios
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Optional[int] = hidden_sizes
lowerCAmelCase__ : Optional[Any] = downsampling_rates
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : Optional[int] = use_labels
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Union[str, Any] = scope
def UpperCAmelCase__( self : Tuple )-> Optional[Any]:
lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__( self : List[str] )-> Optional[int]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> Any:
lowerCAmelCase__ : Union[str, Any] = SegformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Any:
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : Tuple = SegformerForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCAmelCase__ : Tuple = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int )-> Tuple:
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : Tuple = SegformerForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase__( self : Union[str, Any] )-> List[str]:
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = config_and_inputs
lowerCAmelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowercase , _lowercase , unittest.TestCase):
_a : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_a : Any = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a : List[Any] = True
_a : int = False
_a : List[str] = False
_a : Union[str, Any] = False
def UpperCAmelCase__( self : Optional[int] )-> Dict:
lowerCAmelCase__ : List[Any] = SegformerModelTester(self )
lowerCAmelCase__ : Optional[Any] = SegformerConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase__( self : Optional[int] )-> Any:
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any )-> Dict:
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] )-> Tuple:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def UpperCAmelCase__( self : int )-> Dict:
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def UpperCAmelCase__( self : str )-> str:
pass
def UpperCAmelCase__( self : str )-> Any:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : int = [*signature.parameters.keys()]
lowerCAmelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] )-> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Union[str, Any] = outputs.attentions
lowerCAmelCase__ : List[str] = sum(self.model_tester.depths )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : int = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ : str = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase__ : str = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase__ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase__ : int = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ : List[Any] = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCAmelCase__( self : List[str] )-> List[Any]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowerCAmelCase__ : str = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Union[str, Any] = outputs.hidden_states
lowerCAmelCase__ : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Optional[int] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple )-> Dict:
if not self.model_tester.is_training:
return
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
lowerCAmelCase__ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
lowerCAmelCase__ : Any = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__( self : Union[str, Any] )-> Dict:
pass
@slow
def UpperCAmelCase__( self : Union[str, Any] )-> List[Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = SegformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class _a ( unittest.TestCase):
@slow
def UpperCAmelCase__( self : str )-> Any:
# only resize + normalize
lowerCAmelCase__ : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[int] = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__( self : Optional[Any] )-> Any:
# only resize + normalize
lowerCAmelCase__ : Union[str, Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase__ : Dict = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-1 ) )
@slow
def UpperCAmelCase__( self : Any )-> Optional[Any]:
# only resize + normalize
lowerCAmelCase__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase__ : Any = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = outputs.logits.detach().cpu()
lowerCAmelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] )
lowerCAmelCase__ : Any = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
| 131 | 1 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __snake_case :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=False , lowercase=True , lowercase=False , lowercase=False , lowercase=19 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
a__: List[str] = parent
a__: str = batch_size
a__: Dict = seq_length
a__: Any = is_training
a__: str = use_input_mask
a__: List[str] = use_token_type_ids
a__: List[str] = use_labels
a__: Any = vocab_size
a__: List[str] = hidden_size
a__: Tuple = num_hidden_layers
a__: Optional[Any] = num_attention_heads
a__: Any = intermediate_size
a__: List[str] = hidden_act
a__: Union[str, Any] = hidden_dropout_prob
a__: Any = attention_probs_dropout_prob
a__: Any = max_position_embeddings
a__: Optional[int] = type_vocab_size
a__: List[Any] = type_sequence_label_size
a__: List[Any] = initializer_range
a__: Tuple = num_labels
a__: Optional[Any] = num_choices
a__: Any = scope
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: int = None
if self.use_input_mask:
a__: int = random_attention_mask([self.batch_size, self.seq_length])
a__: Optional[int] = None
a__: List[str] = None
a__: Tuple = None
if self.use_labels:
a__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__: int = ids_tensor([self.batch_size] , self.num_choices)
a__: List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: int = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowercase , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__: List[Any] = EsmForProteinFolding(config=lowercase).float()
model.to(lowercase)
model.eval()
a__: List[str] = model(lowercase , attention_mask=lowercase)
a__: Any = model(lowercase)
a__: Optional[int] = model(lowercase)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Union[str, Any] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
): int = config_and_inputs
a__: str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = False
a__ = (EsmForProteinFolding,) if is_torch_available() else ()
a__ = ()
a__ = {} if is_torch_available() else {}
a__ = False
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = EsmFoldModelTester(self)
a__: str = ConfigTester(self , config_class=lowercase , hidden_size=37)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
@unittest.skip('Does not support attention outputs')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing')
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support passing input embeds!')
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.')
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
pass
@unittest.skip('ESMFold only has one output format.')
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support input chunking.')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support data parallel.')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch
class __snake_case ( __lowerCAmelCase ):
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Dict = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1').float()
model.eval()
a__: List[str] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
a__: Optional[Any] = model(lowercase)['positions']
a__: List[Any] = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowercase , atol=1e-4))
| 203 | """simple docstring"""
from __future__ import annotations
class __snake_case :
def __init__( self , lowercase=None) -> Optional[Any]:
'''simple docstring'''
a__: int = data
a__: str = None
def __repr__( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = []
a__: Union[str, Any] = self
while temp:
string_rep.append(f'{temp.data}')
a__: Tuple = temp.next
return "->".join(lowercase)
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
if not elements_list:
raise Exception('The Elements List is empty' )
a__: Any = Node(elements_list[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
a__: Optional[Any] = Node(elements_list[i] )
a__: Tuple = current.next
return head
def __a ( _SCREAMING_SNAKE_CASE ) ->None:
if head_node is not None and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def __a ( ) ->Optional[Any]:
from doctest import testmod
testmod()
a__: Tuple = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(_SCREAMING_SNAKE_CASE )
print('Elements in Reverse:' )
print_reverse(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 203 | 1 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
if "://" in dataset_path:
lowercase__: str = dataset_path.split('''://''' )[1]
return dataset_path
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: str = not is_remote_filesystem(__UpperCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCAmelCase ) , fs._strip_protocol(__UpperCAmelCase ) )
else:
fs.mv(__UpperCAmelCase , __UpperCAmelCase , recursive=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase__: str = None
lowercase__: Dict = None
lowercase__: List[str] = threading.Lock()
| 177 | """simple docstring"""
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
lowercase__: str = []
lowercase__: str = 1_1
lowercase__: str = int('''1''' + '''0''' * digit_len )
for num in range(__UpperCAmelCase , __UpperCAmelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__UpperCAmelCase , __UpperCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowercase__: Dict = 1_0
return solutions
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 2 ) -> int:
lowercase__: List[str] = 1.0
for fraction in fraction_list(__UpperCAmelCase ):
lowercase__: List[str] = Fraction(__UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 177 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 271 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
a = '''docs/source/en/_toctree.yml'''
def _snake_case ( _snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = defaultdict(_snake_case )
_A = []
_A = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_snake_case )
_A = new_doc_list
_A = [key for key, value in counts.items() if value > 1]
_A = []
for duplicate_key in duplicates:
_A = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_snake_case ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_A = sorted(_snake_case , key=lambda _snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_snake_case ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_snake_case )
# Sort
return overview_doc
def _snake_case ( _snake_case : Tuple=False ) -> List[Any]:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' ) as f:
_A = yaml.safe_load(f.read() )
# Get to the API doc
_A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_A = content[api_idx]['sections']
# Then to the model doc
_A = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_A = api_doc[scheduler_idx]['sections']
_A = clean_doc_toc(_snake_case )
_A = False
if new_scheduler_doc != scheduler_doc:
_A = True
if overwrite:
_A = new_scheduler_doc
if diff:
if overwrite:
_A = api_doc
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def _snake_case ( _snake_case : str=False ) -> Union[str, Any]:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' ) as f:
_A = yaml.safe_load(f.read() )
# Get to the API doc
_A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_A = content[api_idx]['sections']
# Then to the model doc
_A = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_A = False
_A = api_doc[pipeline_idx]['sections']
_A = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_A = pipeline_doc['section']
_A = clean_doc_toc(_snake_case )
if overwrite:
_A = new_sub_pipeline_doc
new_pipeline_docs.append(_snake_case )
# sort overall pipeline doc
_A = clean_doc_toc(_snake_case )
if new_pipeline_docs != pipeline_docs:
_A = True
if overwrite:
_A = new_pipeline_docs
if diff:
if overwrite:
_A = api_doc
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 271 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__="None" , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
a : Union[str, Any] = parent
a : Dict = batch_size
a : Any = seq_length
a : Dict = is_training
a : str = use_input_mask
a : Tuple = use_token_type_ids
a : Union[str, Any] = use_labels
a : Union[str, Any] = vocab_size
a : int = hidden_size
a : int = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : str = intermediate_size
a : Any = hidden_act
a : List[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : int = max_position_embeddings
a : Dict = type_vocab_size
a : int = type_sequence_label_size
a : List[Any] = initializer_range
a : Optional[int] = num_labels
a : Union[str, Any] = num_choices
a : Union[str, Any] = relative_attention
a : Dict = position_biased_input
a : Union[str, Any] = pos_att_type
a : str = scope
def __a ( self ) -> str:
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : List[str] = None
if self.use_input_mask:
a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a : Dict = None
if self.use_token_type_ids:
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : Any = None
a : int = None
a : Dict = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : Optional[int] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : int = TFDebertaVaModel(config=lowerCAmelCase__ )
a : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
a : List[str] = [input_ids, input_mask]
a : Tuple = model(lowerCAmelCase__ )
a : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Any = TFDebertaVaForMaskedLM(config=lowerCAmelCase__ )
a : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Dict = self.num_labels
a : List[Any] = TFDebertaVaForSequenceClassification(config=lowerCAmelCase__ )
a : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Dict = self.num_labels
a : Any = TFDebertaVaForTokenClassification(config=lowerCAmelCase__ )
a : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : List[Any] = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase__ )
a : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> Dict:
a : List[str] = self.prepare_config_and_inputs()
(
a
) : Optional[int] = config_and_inputs
a : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowerCamelCase : int =(
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase : str =(
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Optional[Any] =False
lowerCamelCase : Any =False
def __a ( self ) -> Optional[Any]:
a : Optional[int] = TFDebertaVaModelTester(self )
a : Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __a ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __a ( self ) -> int:
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def __a ( self ) -> Any:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> str:
a : List[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def __a ( self ) -> List[Any]:
pass
@slow
def __a ( self ) -> List[str]:
a : int = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
a : Tuple = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
a : int = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a : Union[str, Any] = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 )
| 105 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342 | 0 |
"""simple docstring"""
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = text, pattern
UpperCAmelCase , UpperCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE ), len(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE ( self ) -> list[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase : Union[str, Any] = self.mismatch_in_text(_SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Union[str, Any] = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase : Any = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A: Optional[Any] = "ABAABA"
A: str = "AB"
A: int = BoyerMooreSearch(text, pattern)
A: List[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 76 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( UpperCamelCase : str ):
def decorator(UpperCamelCase : Optional[int] ):
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , """handle_key""" , [] )
handle += [key]
setattr(UpperCamelCase , """handle_key""" , UpperCamelCase )
return func
return decorator
def _snake_case ( *UpperCamelCase : List[str] ):
def decorator(UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """handle_key""" , [] )
handle += keys
setattr(UpperCamelCase , """handle_key""" , UpperCamelCase )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __new__( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] = super().__new__(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not hasattr(_SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(_SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(_SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase : List[str] = getattr(_SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
UpperCAmelCase : Optional[int] = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase : List[Any] = ord(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = cls.key_handler.get(_SCREAMING_SNAKE_CASE )
if handler:
UpperCAmelCase : int = char
return handler(cls )
else:
return None
def _snake_case ( cls : Union[str, Any] ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 76 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 142 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(A__ , A__ )
UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F.to_tensor(A__ )
UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: 'table', 1: 'table rotated'}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
UpperCamelCase = TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ )
UpperCamelCase = Image.open(A__ ).convert('RGB' )
UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 )
UpperCamelCase = model(A__ )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : List[str] = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ['''ConvNextFeatureExtractor''']
__A : int = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 27 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1 , ):
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : int = is_training
UpperCamelCase : Union[str, Any] = use_input_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : Dict = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : Dict = initializer_range
UpperCamelCase : Dict = num_labels
UpperCamelCase : Tuple = num_choices
UpperCamelCase : Optional[int] = scope
UpperCamelCase : List[Any] = q_groups
UpperCamelCase : Tuple = k_groups
UpperCamelCase : Any = v_groups
UpperCamelCase : List[str] = post_attention_groups
UpperCamelCase : Tuple = intermediate_groups
UpperCamelCase : int = output_groups
def a_ ( self ):
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Tuple = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = SqueezeBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : str = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = self.num_labels
UpperCamelCase : Optional[Any] = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = self.num_labels
UpperCamelCase : str = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = self.num_choices
UpperCamelCase : Tuple = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self ):
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = config_and_inputs
UpperCamelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase : Dict = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase : Dict = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Dict = False
lowercase : str = True
lowercase : str = False
def a_ ( self ):
UpperCamelCase : Any = SqueezeBertModelTester(self )
UpperCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def a_ ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def a_ ( self ):
UpperCamelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
UpperCamelCase : Dict = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : Optional[Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 27 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
a :Optional[int] = logging.get_logger(__name__)
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , *_a , **_a ) -> None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 132 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __a (unittest.TestCase):
'''simple docstring'''
def __init__( self , _a , _a = True , _a = None , _a = 32 , _a = True , _a = 1 / 255 , _a = True , _a = True , _a = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _a = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _a = True , _a=7 , _a=30 , _a=400 , _a=3 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Tuple = do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 288}
SCREAMING_SNAKE_CASE__ : List[str] = size_divisor
SCREAMING_SNAKE_CASE__ : Tuple = do_rescale
SCREAMING_SNAKE_CASE__ : List[str] = rescale_factor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE__ : List[str] = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Dict = min_resolution
SCREAMING_SNAKE_CASE__ : str = max_resolution
def _a ( self ) -> List[str]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _a ( self , _a , _a=False ) -> int:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE__ : List[Any] = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_inputs[0]
if isinstance(_a , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Tuple = size / min(_a , _a )
if h < w:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = size, scale * w
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((1_333 / 800) * size )
if max(_a , _a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[str] = max_size / max(_a , _a )
SCREAMING_SNAKE_CASE__ : Any = newh * scale
SCREAMING_SNAKE_CASE__ : Any = neww * scale
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : Dict = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Any = max(_a , key=lambda _a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Any = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = BridgeTowerImageProcessor if is_vision_available() else None
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = BridgeTowerImageProcessingTester(self )
@property
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """size_divisor""" ) )
def _a ( self ) -> List[str]:
"""simple docstring"""
pass
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict = image_processing(_a , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 132 | 1 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=64 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=64 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : str = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : List[Any] = type_vocab_size
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Dict = num_labels
UpperCAmelCase__ : Union[str, Any] = num_choices
UpperCAmelCase__ : Any = scope
def _a (self ):
"""simple docstring"""
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : str = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = MPNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Tuple = model(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = MPNetForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : List[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Tuple = MPNetForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Tuple = MPNetForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : List[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.num_labels
UpperCAmelCase__ : List[str] = MPNetForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = MPNetModelTester(self )
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_lowerCamelCase )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
UpperCAmelCase__ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase )[0]
UpperCAmelCase__ : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
UpperCAmelCase__ : Tuple = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 166 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_A = """__DUMMY_TRANSFORMERS_USER__"""
_A = """Dummy User"""
_A = """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"""
_A = """https://hub-ci.huggingface.co"""
_A = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
_A = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
_A = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[Any]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , lowerCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[Any]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
HfFolder.save_token(lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def a__ ( ) -> List[str]:
return HfApi(endpoint=lowerCAmelCase )
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : List[str] = HfFolder.get_token()
HfFolder.save_token(lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[str]:
def _cleanup_repo(lowerCAmelCase ):
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def a__ ( lowerCAmelCase ) -> Optional[Any]:
@contextmanager
def _temporary_repo(lowerCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : str = F"""repo_txt_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : Any = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Union[str, Any] = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : Optional[int] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 166 | 1 |
def __magic_name__ ( A : str ):
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def __magic_name__ ( A : str ):
'''simple docstring'''
a = credit_card_number
a = 0
a = len(A ) - 2
for i in range(A, -1, -2 ):
# double the value of every second digit
a = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a = cc_number[:i] + str(A ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(A ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __magic_name__ ( A : str ):
'''simple docstring'''
a = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(A ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(A ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(A ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 107 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=("en_XX",) , UpperCamelCase__=None , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : List[Any] = vocab_size
snake_case : List[Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : List[str] = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : int = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : Tuple = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[Any] = position_embedding_type
snake_case : int = use_cache
snake_case : Dict = classifier_dropout
snake_case : Dict = pre_norm
snake_case : Union[str, Any] = adapter_reduction_factor
snake_case : Any = adapter_layer_norm
snake_case : Optional[int] = adapter_reuse_layer_norm
snake_case : List[Any] = ln_before_adapter
snake_case : str = list(UpperCamelCase__ )
snake_case : int = default_language
class _lowerCAmelCase ( snake_case_ ):
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 203 | 0 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
snake_case_ = os.path.abspath(_a )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
snake_case_ = torch.load(_a , map_location="""cpu""" )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
snake_case_ = convert_pytorch_state_dict_to_flax(_a , _a )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
snake_case_ = convert_pytorch_sharded_state_dict_to_flax(_a , _a )
return flax_state_dict
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
def is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ) -> bool:
return len(set(_a ) & {key, (model_prefix,) + key} ) > 0
# layer norm
snake_case_ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_a ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
snake_case_ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_a ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
snake_case_ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_a ):
return renamed_pt_tuple_key, pt_tensor
# embedding
snake_case_ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_a ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_a ):
snake_case_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_a ):
snake_case_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
snake_case_ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
snake_case_ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
snake_case_ = pt_tuple_key[-2] + "_v"
if name is not None:
snake_case_ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
# convert pytorch tensor to numpy
snake_case_ = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
snake_case_ = flax_model.params["params"]
else:
snake_case_ = flax_model.params
snake_case_ = flatten_dict(_a )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case_ = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(_a )
snake_case_ = {}
snake_case_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
snake_case_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
snake_case_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case_ = rename_key_and_reshape_tensor(
_a , _a , _a , _a )
# add model prefix if necessary
snake_case_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
snake_case_ = jnp.asarray(_a )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_a , _a )
continue
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(_a )
else:
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(_a )
return unflatten_dict(_a )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
import torch
# Load the index
snake_case_ = {}
for shard_file in shard_filenames:
# load using msgpack utils
snake_case_ = torch.load(_a )
snake_case_ = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case_ = flax_model.params["params"]
snake_case_ = flatten_dict(_a )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
snake_case_ = flax_model.params
snake_case_ = flatten_dict(_a )
snake_case_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
snake_case_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
snake_case_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case_ = rename_key_and_reshape_tensor(
_a , _a , _a , _a )
# add model prefix if necessary
snake_case_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
snake_case_ = jnp.asarray(_a )
continue
if "var" in flax_key[-1]:
snake_case_ = jnp.asarray(_a )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_a , _a )
continue
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(_a )
else:
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(_a )
return unflatten_dict(_a )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = os.path.abspath(_a )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
snake_case_ = getattr(_a , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(_a , """rb""" ) as state_f:
try:
snake_case_ = from_bytes(_a , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_a , _a )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
snake_case_ = flatten_dict(jax.tree_util.tree_map(lambda _SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , _a ) ).values()
if any(_a ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
snake_case_ = jax.tree_util.tree_map(
lambda _SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _a )
snake_case_ = flatten_dict(_a )
snake_case_ = pt_model.state_dict()
snake_case_ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
snake_case_ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
snake_case_ = []
snake_case_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case_ = flax_key_tuple[0] == pt_model.base_model_prefix
snake_case_ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_a ) not in pt_model_dict:
# conv layer
snake_case_ = flax_key_tuple[:-1] + ("weight",)
snake_case_ = jnp.transpose(_a , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_a ) not in pt_model_dict:
# linear layer
snake_case_ = flax_key_tuple[:-1] + ("weight",)
snake_case_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case_ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
snake_case_ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
snake_case_ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
snake_case_ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
snake_case_ = ".".join(_a )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
snake_case_ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
snake_case_ = key.split(""".""" )
snake_case_ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
snake_case_ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
snake_case_ = key_components[-2] + "_v"
if name is not None:
snake_case_ = key_components[:-3] + [name]
snake_case_ = ".".join(_a )
snake_case_ = key
if flax_key in special_pt_names:
snake_case_ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
snake_case_ = np.asarray(_a ) if not isinstance(_a , np.ndarray ) else flax_tensor
snake_case_ = torch.from_numpy(_a )
# remove from missing keys
missing_keys.remove(_a )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_a )
pt_model.load_state_dict(_a )
# re-transform missing_keys to list
snake_case_ = list(_a )
if len(_a ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(_a ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"""If your task is similar to the task the model of the checkpoint was trained on, """
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 364 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> Union[str, Any]:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 233 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__snake_case : Any = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__snake_case : str = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
)
__snake_case : Optional[Any] = """|""".join(sys.argv[1:])
__snake_case : Optional[int] = re.compile(rF"""^({joined_dirs}).*?\.py$""")
__snake_case : List[Any] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 248 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]:
super().__init__(*_lowercase , **_lowercase )
a_ : Optional[int] = eval_examples
a_ : Tuple = post_process_function
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = "eval" ) -> Union[str, Any]:
a_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[str] = self.get_eval_dataloader(_lowercase )
a_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : Optional[int] = self.compute_metrics
a_ : List[str] = None
a_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Any = time.time()
try:
a_ : Union[str, Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Dict = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ : List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions )
a_ : Optional[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : List[str] = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
a_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" ) -> str:
a_ : Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[Any] = self.compute_metrics
a_ : int = None
a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Union[str, Any] = time.time()
try:
a_ : List[str] = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Optional[Any] = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" )
a_ : List[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : int = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 248 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<sep>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<eop>", "<eod>"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowercase_ = 3
lowercase_ = do_lower_case
lowercase_ = remove_space
lowercase_ = keep_accents
lowercase_ = vocab_file
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
lowercase_ = jieba
lowercase_ = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
if self.remove_space:
lowercase_ = " ".join(inputs.strip().split() )
else:
lowercase_ = inputs
lowercase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowercase_ = unicodedata.normalize("NFKD" , lowercase_ )
lowercase_ = "".join([c for c in outputs if not unicodedata.combining(lowercase_ )] )
if self.do_lower_case:
lowercase_ = outputs.lower()
return outputs
def A__ ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.preprocess_text(lowercase_ )
lowercase_ = self.sp_model.encode(lowercase_ , out_type=lowercase_ )
lowercase_ = []
for piece in pieces:
if len(lowercase_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowercase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase_ = cur_pieces[1:]
else:
lowercase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase_ )
else:
new_pieces.append(lowercase_ )
return new_pieces
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.sp_model.PieceToId(lowercase_ )
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase_ )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = "".join(lowercase_ ).replace(lowercase_ , " " ).strip()
return out_string
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1, 1]
return ([0] * len(lowercase_ )) + [1, 1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase_ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = super()._decode(*lowercase_ , **lowercase_ )
lowercase_ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 351 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCamelCase__ :
def __init__( self :List[str] , _A :Union[str, Any] , _A :Dict=13 , _A :Optional[int]=7 , _A :int=True , _A :Dict=True , _A :Tuple=True , _A :int=True , _A :int=99 , _A :List[Any]=32 , _A :int=2 , _A :List[str]=4 , _A :List[Any]=37 , _A :Optional[Any]="gelu" , _A :Optional[Any]=0.1 , _A :Union[str, Any]=0.1 , _A :Dict=512 , _A :List[str]=16 , _A :List[str]=2 , _A :Dict=0.02 , _A :Dict=3 , _A :List[str]=4 , _A :Tuple=None , _A :List[str]=0 , ) -> int:
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = projection_dim
def lowercase_ ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
__A = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self :Optional[int] , _A :Dict , _A :Dict , _A :List[Any] , _A :int , _A :List[str] , _A :Optional[Any] , _A :Dict ) -> str:
'''simple docstring'''
__A = TFDPRContextEncoder(config=_A )
__A = model(_A , attention_mask=_A , token_type_ids=_A )
__A = model(_A , token_type_ids=_A )
__A = model(_A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase_ ( self :Dict , _A :List[Any] , _A :Optional[int] , _A :Dict , _A :Any , _A :Tuple , _A :Union[str, Any] , _A :List[Any] ) -> Dict:
'''simple docstring'''
__A = TFDPRQuestionEncoder(config=_A )
__A = model(_A , attention_mask=_A , token_type_ids=_A )
__A = model(_A , token_type_ids=_A )
__A = model(_A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase_ ( self :Union[str, Any] , _A :Optional[Any] , _A :Tuple , _A :Tuple , _A :str , _A :Optional[Any] , _A :List[Any] , _A :Optional[Any] ) -> List[Any]:
'''simple docstring'''
__A = TFDPRReader(config=_A )
__A = model(_A , attention_mask=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase_ ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : int = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[str] = False
def lowercase_ ( self :List[Any] ) -> List[str]:
'''simple docstring'''
__A = TFDPRModelTester(self )
__A = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self :Tuple ) -> List[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_A )
def lowercase_ ( self :Any ) -> Dict:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_A )
def lowercase_ ( self :Dict ) -> List[str]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_A )
@slow
def lowercase_ ( self :List[str] ) -> Any:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDPRContextEncoder.from_pretrained(_A )
self.assertIsNotNone(_A )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDPRContextEncoder.from_pretrained(_A )
self.assertIsNotNone(_A )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDPRQuestionEncoder.from_pretrained(_A )
self.assertIsNotNone(_A )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDPRReader.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_tf
class UpperCamelCase__ ( unittest.TestCase):
@slow
def lowercase_ ( self :Union[str, Any] ) -> int:
'''simple docstring'''
__A = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
__A = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
__A = model(_A )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__A = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 161 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
a__ : Dict = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> List[str]:
"""simple docstring"""
__A = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__A = int(re.match(R'.*layer_(\d*).*' , UpperCAmelCase )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def snake_case ( UpperCAmelCase )-> Any:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
__A = re.search(R'[^\d](\d+)$' , str(UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
__A = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> str:
"""simple docstring"""
# Construct model
if bloom_config_file == "":
__A = BloomConfig()
else:
__A = BloomConfig.from_json_file(UpperCAmelCase )
if shard_model:
__A = os.listdir(UpperCAmelCase )
__A = sorted(filter(lambda UpperCAmelCase : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase ) )
__A = {'weight_map': {}, 'metadata': {}}
__A = 0
__A = None
__A = BloomConfig()
for j, file in enumerate(UpperCAmelCase ):
print('Processing file: {}'.format(UpperCAmelCase ) )
__A = None
for i in range(UpperCAmelCase ):
# load all TP files
__A = file.replace('model_00' , f'model_0{i}' )
__A = torch.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
__A = list(temp.keys() )
for key in keys:
__A = temp.pop(UpperCAmelCase )
if tensors is None:
__A = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__A = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__A = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase , os.path.join(
UpperCAmelCase , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__A = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__A = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase ) ).zfill(5 ) )
__A = BloomConfig()
__A = pytorch_dump_folder_path + '/' + CONFIG_NAME
__A = total_size
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
__A = json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + '\n'
f.write(UpperCAmelCase )
else:
__A = BloomModel(UpperCAmelCase )
__A = os.listdir(UpperCAmelCase )
__A = sorted(filter(lambda UpperCAmelCase : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase ) )
__A = None
for i, file in enumerate(UpperCAmelCase ):
__A = None
for i in range(UpperCAmelCase ):
# load all TP files
__A = file.replace('model_00' , f'model_0{i}' )
__A = torch.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
__A = list(temp.keys() )
for key in keys:
__A = temp.pop(UpperCAmelCase )
if tensors is None:
__A = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__A = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__A = tensors[key] / pretraining_tp
__A = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
__A = set(other_keys.missing_keys )
else:
__A = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
__A = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__A = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
__A = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
a__ : Tuple = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 161 | 1 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __snake_case :
pass
| 357 | """simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase__ = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , lowercase = None) -> Union[str, Any]:
'''simple docstring'''
a__: List[Any] = None
a__: Optional[Any] = os.path.abspath(os.path.join('examples' , 'by_feature'))
a__: Optional[Any] = os.path.abspath('examples')
for item in os.listdir(lowercase):
if item not in EXCLUDE_EXAMPLES:
a__: Dict = os.path.join(lowercase , lowercase)
if os.path.isfile(lowercase) and ".py" in item_path:
with self.subTest(
tested_script=lowercase , feature_script=lowercase , tested_section='main()' if parser_only else 'training_function()' , ):
a__: List[Any] = compare_against_test(
os.path.join(lowercase , lowercase) , lowercase , lowercase , lowercase)
a__: Dict = '\n'.join(lowercase)
if special_strings is not None:
for string in special_strings:
a__: Union[str, Any] = diff.replace(lowercase , '')
self.assertEqual(lowercase , '')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
self.one_complete_example('complete_nlp_example.py' , lowercase)
self.one_complete_example('complete_nlp_example.py' , lowercase)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py'))
a__: Tuple = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , lowercase , lowercase , lowercase)
self.one_complete_example('complete_cv_example.py' , lowercase , lowercase , lowercase)
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __snake_case ( __lowerCAmelCase ):
a__ = False
@classmethod
def lowerCamelCase_ ( cls) -> List[str]:
'''simple docstring'''
super().setUpClass()
a__: Dict = tempfile.mkdtemp()
a__: Optional[int] = os.path.join(cls._tmpdir , 'default_config.yml')
write_basic_config(save_location=cls.configPath)
a__: List[str] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls) -> List[str]:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0')))
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Any = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
a__: Tuple = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2')))
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0")}\n '.split()
a__: Optional[int] = run_command(self._launch_args + testargs , return_stdout=lowercase)
self.assertNotIn('epoch 0:' , lowercase)
self.assertIn('epoch 1:' , lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: int = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2")}\n '.split()
a__: List[str] = run_command(self._launch_args + testargs , return_stdout=lowercase)
if torch.cuda.is_available():
a__: Union[str, Any] = torch.cuda.device_count()
else:
a__: Optional[int] = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , lowercase)
self.assertIn('epoch 1:' , lowercase)
else:
self.assertIn('epoch 0:' , lowercase)
self.assertIn('epoch 1:' , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'}):
a__: Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=lowercase)
a__: Union[str, Any] = re.findall('({.+})' , lowercase)
a__: Dict = [r for r in results if 'accuracy' in r][-1]
a__: Optional[Any] = ast.literal_eval(lowercase)
self.assertGreaterEqual(results['accuracy'] , 0.75)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: str = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'})
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
a__: List[str] = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(lowercase , 'tracking')))
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: int = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs)
| 203 | 0 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCAmelCase ( lowercase : Any , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.01 , lowercase : float = 1 , ) -> Any:
"""simple docstring"""
snake_case : Union[str, Any] = False
snake_case : List[str] = search_prob
snake_case : int = start_temperate
snake_case : Dict = []
snake_case : str = 0
snake_case : List[str] = None
while not search_end:
snake_case : int = current_state.score()
if best_state is None or current_score > best_state.score():
snake_case : Tuple = current_state
scores.append(lowercase )
iterations += 1
snake_case : Union[str, Any] = None
snake_case : str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
snake_case : Optional[Any] = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor
snake_case : Tuple = neighbors.pop(lowercase )
snake_case : List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
snake_case : List[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
snake_case : Optional[int] = picked_neighbor
else:
snake_case : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
snake_case : Dict = picked_neighbor
snake_case : Union[str, Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
snake_case : int = True
else:
snake_case : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase ) , lowercase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCAmelCase ( lowercase : int , lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
__snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
__snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 203 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
snake_case : Tuple = parser.parse_args()
return args.f
def __lowerCAmelCase ( lowercase : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case : Any = {}
snake_case : int = os.path.join(lowercase , "all_results.json" )
if os.path.exists(lowercase ):
with open(lowercase , "r" ) as f:
snake_case : Optional[int] = json.load(lowercase )
else:
raise ValueError(F'can\'t find {path}' )
return results
def __lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCAmelCase ( snake_case_ ):
@classmethod
def lowerCamelCase ( cls ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = tempfile.mkdtemp()
snake_case : int = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
snake_case : Union[str, Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowerCamelCase ( cls ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : str = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
snake_case : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case : Any = get_results(UpperCamelCase__ )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case : str = F'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Dict = get_results(UpperCamelCase__ )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = 7 if get_gpu_count() > 1 else 2
snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
snake_case : List[str] = F'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[Any] = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Dict = get_results(UpperCamelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Tuple = self.get_auto_remove_tmp_dir()
snake_case : List[str] = F'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Optional[Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Tuple = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Any = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = self.get_auto_remove_tmp_dir()
snake_case : int = F'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "translation_no_trainer" ) ) )
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : str = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase__ )
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
snake_case : Optional[Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : str = F'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
snake_case : str = get_results(UpperCamelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "image_classification_no_trainer" ) ) )
| 203 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : str=10 , SCREAMING_SNAKE_CASE__ : str=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__ : str=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[Any]="relu" , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Tuple=None , ) -> Any:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embeddings_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = len(_a )
def a ( self : Union[str, Any] ) -> str:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : Optional[Any] ) -> Any:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
lowerCAmelCase__ = TFResNetModel(config=_a )
lowerCAmelCase__ = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFResNetForImageClassification(_a )
lowerCAmelCase__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Dict ) -> Tuple:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
snake_case__ = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : int ) -> Any:
lowerCAmelCase__ = TFResNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def a ( self : List[Any] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : Optional[int] ) -> Tuple:
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def a ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def a ( self : Optional[int] ) -> Dict:
pass
def a ( self : Dict ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(_a )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def a ( self : List[str] ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a ( self : int ) -> Optional[Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCAmelCase__ = model_class(_a )
lowerCAmelCase__ = model(**self._prepare_for_class(_a , _a ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase__ = layer_type
lowerCAmelCase__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(_a , _a , _a )
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a ( self : Optional[int] ) -> List[Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFResNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Tuple ) -> Any:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=_a , return_tensors="tf" )
# forward pass
lowerCAmelCase__ = model(**_a )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCAmelCase__ = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _a , atol=1e-4 ) )
| 363 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "Speech2TextFeatureExtractor"
snake_case__ = "Speech2TextTokenizer"
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
def __call__( self : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCAmelCase__ = kwargs.pop("raw_speech" )
else:
lowerCAmelCase__ = kwargs.pop("audio" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = kwargs.pop("sampling_rate" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = kwargs.pop("text" , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def a ( self : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@contextmanager
def a ( self : Union[str, Any] ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer
yield
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
| 221 | 0 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase_ (__a : int , __a : int , __a : int ):
"""simple docstring"""
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_a : Optional[Any] = b * b - 4 * a * c
_a : Optional[int] = (-b + sqrt(__a )) / (2 * a)
_a : Dict = (-b - sqrt(__a )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase_ ():
"""simple docstring"""
_a, _a : Tuple = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 271 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list , __a : list , __a : int ):
"""simple docstring"""
_a : Optional[Any] = len(__a )
_a : int = [[0] * n for i in range(__a )]
for i in range(__a ):
_a : Tuple = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
_a : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase : Any = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__ : List[Any] = "lm_head"
lowerCamelCase__ : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowerCamelCase__ : str = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowerCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__ : str = value
elif weight_type == "weight_g":
lowerCamelCase__ : str = value
elif weight_type == "weight_v":
lowerCamelCase__ : Optional[int] = value
elif weight_type == "bias":
lowerCamelCase__ : Optional[int] = value
else:
lowerCamelCase__ : int = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Any = []
lowerCamelCase__ : int = fairseq_model.state_dict()
lowerCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : List[Any] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ : List[Any] = True
if "*" in mapped_key:
lowerCamelCase__ : Optional[int] = name.split(__lowerCamelCase )[0].split('.' )[-2]
lowerCamelCase__ : Optional[int] = mapped_key.replace('*' , __lowerCamelCase )
if "weight_g" in name:
lowerCamelCase__ : str = "weight_g"
elif "weight_v" in name:
lowerCamelCase__ : List[Any] = "weight_v"
elif "bias" in name:
lowerCamelCase__ : Tuple = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : str = "weight"
else:
lowerCamelCase__ : Union[str, Any] = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : Optional[Any] = full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : int = name.split('.' )
lowerCamelCase__ : Any = int(items[0] )
lowerCamelCase__ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__ : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__ : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True ) -> Any:
if config_path is not None:
lowerCamelCase__ : int = UniSpeechConfig.from_pretrained(__lowerCamelCase )
else:
lowerCamelCase__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__ : Union[str, Any] = Dictionary.load_from_json(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : Any = target_dict.pad_index
lowerCamelCase__ : List[Any] = target_dict.bos_index
lowerCamelCase__ : List[str] = target_dict.eos_index
lowerCamelCase__ : List[str] = len(target_dict.symbols )
lowerCamelCase__ : Tuple = os.path.join(__lowerCamelCase , 'vocab.json' )
if not os.path.isdir(__lowerCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCamelCase__ : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ : str = 42
lowerCamelCase__ : int = 43
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple = WavaVecaPhonemeCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__lowerCamelCase , )
lowerCamelCase__ : List[str] = True if config.feat_extract_norm == "layer" else False
lowerCamelCase__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
lowerCamelCase__ : int = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : List[str] = UniSpeechForCTC(__lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = UniSpeechForPreTraining(__lowerCamelCase )
if is_finetuned:
lowerCamelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
lowerCamelCase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_unispeech.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCAmelCase : int = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 360 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : int = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
a_ = 'base_with_context'
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"]))
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"]) , requires_grad=_a)
for lyr_num, lyr in enumerate(model.encoders):
SCREAMING_SNAKE_CASE : List[Any] = weights[f"layers_{lyr_num}"]
SCREAMING_SNAKE_CASE : Any = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : Tuple = ly_weight["attention"]
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"]))
return model
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T))
SCREAMING_SNAKE_CASE : Any = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"]) , requires_grad=_a)
for lyr_num, lyr in enumerate(model.encoders):
SCREAMING_SNAKE_CASE : Optional[Any] = weights[f"layers_{lyr_num}"]
SCREAMING_SNAKE_CASE : Union[str, Any] = ly_weight["attention"]
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T))
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T))
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T))
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"]))
return model
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T))
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"]) , requires_grad=_a)
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T))
for lyr_num, lyr in enumerate(model.decoders):
SCREAMING_SNAKE_CASE : Optional[int] = weights[f"layers_{lyr_num}"]
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T))
SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["self_attention"]
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[Any] = ly_weight["MultiHeadDotProductAttention_0"]
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : str = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T))
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T))
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T))
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"]))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T))
return model
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = checkpoints.load_tax_checkpoint(args.checkpoint_path)
SCREAMING_SNAKE_CASE : Optional[int] = jnp.tree_util.tree_map(onp.array , _a)
SCREAMING_SNAKE_CASE : Dict = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
SCREAMING_SNAKE_CASE : int = os.path.join(args.checkpoint_path , ".." , "config.gin")
SCREAMING_SNAKE_CASE : List[str] = inference.parse_training_gin_file(_a , _a)
SCREAMING_SNAKE_CASE : str = inference.InferenceModel(args.checkpoint_path , _a)
SCREAMING_SNAKE_CASE : Any = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large")
SCREAMING_SNAKE_CASE : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
SCREAMING_SNAKE_CASE : Dict = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
SCREAMING_SNAKE_CASE : Union[str, Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
SCREAMING_SNAKE_CASE : Optional[int] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _a)
SCREAMING_SNAKE_CASE : List[str] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _a)
SCREAMING_SNAKE_CASE : Union[str, Any] = load_decoder(ta_checkpoint["target"]["decoder"] , _a)
SCREAMING_SNAKE_CASE : Tuple = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder")
SCREAMING_SNAKE_CASE : Dict = SpectrogramDiffusionPipeline(
notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , )
if args.save:
pipe.save_pretrained(args.output_path)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
a_ = parser.parse_args()
main(args) | 76 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :Optional[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ :int = TaTokenizer
SCREAMING_SNAKE_CASE__ :List[int] = []
def __init__( self : Any , __a : str=None , __a : Optional[int]=None , __a : Optional[Any]="</s>" , __a : List[Any]="<unk>" , __a : str="<pad>" , __a : Union[str, Any]=100 , __a : List[Any]=None , **__a : Any , ) -> List[str]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase : str = [F'''<extra_id_{i}>''' for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_UpperCamelCase : str = len(set(filter(lambda __a : bool("extra_id_" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
__a , tokenizer_file=__a , eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
_UpperCamelCase : List[str] = vocab_file
_UpperCamelCase : Optional[int] = False if not self.vocab_file else True
_UpperCamelCase : Optional[Any] = extra_ids
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : List[Any] , __a : int , __a : Optional[Any] ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_UpperCamelCase : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , __a , )
return max_model_length
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : Optional[int] = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_UpperCamelCase : Optional[int] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return list(
set(filter(lambda __a : bool(re.search(R"<extra_id_\d+>" , __a ) ) is not None , self.additional_special_tokens ) ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return [self.convert_tokens_to_ids(__a ) for token in self.get_sentinel_tokens()]
| 310 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310 | 1 |
'''simple docstring'''
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : str = data
# Initialize hash values
__a : Any = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
__a : str = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
__a : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __UpperCAmelCase ( __a ):
'''simple docstring'''
__a : Dict = b'\x80' + (b'\x00' * (63 - (len(__a ) + 8) % 64))
__a : List[Any] = struct.pack('>Q' , (len(__a ) * 8) )
return data + padding + big_endian_integer
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__a : Union[str, Any] = list(struct.unpack('>16L' , __a ) )
# add 48 0-ed integers
words += [0] * 48
__a , __a , __a , __a , __a , __a , __a , __a : str = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__a : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__a : int = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__a : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
__a : Any = self.ror(__a , 6 ) ^ self.ror(__a , 11 ) ^ self.ror(__a , 25 )
__a : Union[str, Any] = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
__a : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
__a : Union[str, Any] = self.ror(__a , 2 ) ^ self.ror(__a , 13 ) ^ self.ror(__a , 22 )
__a : Tuple = (a & b) ^ (a & c) ^ (b & c)
__a : List[Any] = (sa + maj) % 0x1_00_00_00_00
__a , __a , __a , __a , __a , __a , __a , __a : Optional[Any] = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
__a : List[str] = [a, b, c, d, e, f, g, h]
# Modify final values
__a : Optional[int] = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
__a : Tuple = ''.join([hex(__a )[2:].zfill(8 ) for value in self.hashes] )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
import hashlib
__a : Tuple = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(__a ).hash , hashlib.shaaaa(__a ).hexdigest() )
def lowerCamelCase ():
import doctest
doctest.testmod()
__a : List[str] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
__a : Optional[Any] = parser.parse_args()
__a : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
__a : Any = f.read()
else:
__a : Optional[Any] = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(_SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 27 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__a , self ).__init__()
__a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a )
__a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return self.bert(**__a ).last_hidden_state
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__a )
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : str = W_supports['sizes'].tolist()
__a : Union[str, Any] = W_supports['start_token_id'].item()
__a : Any = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Tuple = self.BERT(**__a )
__a : str = self.BERT(**__a )
__a : Any = None
__a : Dict = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__a : Optional[int] = 0
else:
__a : Union[str, Any] = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
__a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : str = torch.vstack((p_starts, p_start) )
__a : str = torch.vstack((p_ends, p_end) )
else:
__a : List[str] = p_start
__a : int = p_end
return p_starts, p_ends
| 27 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , '''num_attention_heads''' ) )
class __magic_name__ :
def __init__( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any]=13 , lowerCamelCase__ : Optional[int]=64 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Any=3 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : str=16 , lowerCamelCase__ : Dict=[128, 256, 384] , lowerCamelCase__ : str=[4, 6, 8] , lowerCamelCase__ : str=[2, 3, 4] , lowerCamelCase__ : Tuple=[16, 16, 16] , lowerCamelCase__ : str=0 , lowerCamelCase__ : List[Any]=[2, 2, 2] , lowerCamelCase__ : str=[2, 2, 2] , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=2 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : List[Any] = image_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Tuple = kernel_size
UpperCamelCase__ : str = stride
UpperCamelCase__ : Optional[Any] = padding
UpperCamelCase__ : List[str] = hidden_sizes
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : Any = depths
UpperCamelCase__ : int = key_dim
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : Optional[int] = attention_ratio
UpperCamelCase__ : Any = mlp_ratio
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : Tuple = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase__ : Dict = is_training
UpperCamelCase__ : Optional[Any] = use_labels
UpperCamelCase__ : List[str] = num_labels
UpperCamelCase__ : Tuple = initializer_range
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Any = None
if self.use_labels:
UpperCamelCase__ : str = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = LevitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Any = model(lowerCamelCase__ )
UpperCamelCase__ : int = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__ : Dict = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : Optional[int] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase__ : Optional[int] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.num_labels
UpperCamelCase__ : Union[str, Any] = LevitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : List[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = config_and_inputs
UpperCamelCase__ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: int = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A: Any = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A: List[str] = False
A: Optional[Any] = False
A: int = False
A: List[str] = False
A: Optional[Any] = False
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = LevitModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(lowerCamelCase__ )
UpperCamelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ):
UpperCamelCase__ : Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ : List[str] = outputs.hidden_states
UpperCamelCase__ : Optional[int] = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
UpperCamelCase__ : Any = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : str = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase__ : str = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase__ , UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : str = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=False ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : int = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase__ : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase__ : Dict = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ : Dict = model(**lowerCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase__ : Any = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase__ : Optional[int] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ : List[Any] = model(**lowerCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : str = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
UpperCamelCase__ : Optional[Any] = problem_type['''title''']
UpperCamelCase__ : List[Any] = problem_type['''num_labels''']
UpperCamelCase__ : List[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
UpperCamelCase__ : Dict = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
UpperCamelCase__ : Dict = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
UpperCamelCase__ : Union[str, Any] = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : str = LevitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ : Dict = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = self.default_image_processor
UpperCamelCase__ : Tuple = prepare_img()
UpperCamelCase__ : List[str] = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**lowerCamelCase__ )
# verify the logits
UpperCamelCase__ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ : List[str] = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 51 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__UpperCamelCase : Union[str, Any] = "bert-base-cased"
__UpperCamelCase : Tuple = "google/pegasus-xsum"
__UpperCamelCase : Union[str, Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
__UpperCamelCase : Union[str, Any] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
__UpperCamelCase : Any = "patrickvonplaten/t5-tiny-random"
__UpperCamelCase : List[Any] = "sshleifer/bart-tiny-random"
__UpperCamelCase : Any = "sshleifer/tiny-mbart"
__UpperCamelCase : Optional[Any] = "sshleifer/tiny-marian-en-de"
def _a ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = '''\n'''.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , F"{split}.source" ) , SCREAMING_SNAKE_CASE )
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , F"{split}.target" ) , SCREAMING_SNAKE_CASE )
return tmp_dir
class __magic_name__ ( __lowerCAmelCase):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCamelCase__ : Tuple = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in ARTICLES )
UpperCamelCase__ : List[str] = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in SUMMARIES )
UpperCamelCase__ : int = 4
UpperCamelCase__ : Union[str, Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
UpperCamelCase__ : List[str] = SeqaSeqDataset(
lowerCamelCase__ , data_dir=lowerCamelCase__ , type_path='''train''' , max_source_length=lowerCamelCase__ , max_target_length=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , )
UpperCamelCase__ : Dict = DataLoader(lowerCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCamelCase__ : Dict = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCamelCase__ : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in ARTICLES )
UpperCamelCase__ : str = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in SUMMARIES )
UpperCamelCase__ : Union[str, Any] = 4
UpperCamelCase__ : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase__ , data_dir=lowerCamelCase__ , type_path='''train''' , max_source_length=20 , max_target_length=lowerCamelCase__ , )
UpperCamelCase__ : List[str] = DataLoader(lowerCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
UpperCamelCase__ : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCamelCase__ : int = tmp_dir.joinpath('''train.source''' ).open().readlines()
UpperCamelCase__ : Tuple = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase__ , lowerCamelCase__ , 128 , lowerCamelCase__ )
UpperCamelCase__ : str = {x.name for x in tmp_dir.iterdir()}
UpperCamelCase__ : Optional[int] = {x.name for x in save_dir.iterdir()}
UpperCamelCase__ : Dict = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase__ ) < len(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = self._get_dataset(max_len=64 )
UpperCamelCase__ : List[str] = 64
UpperCamelCase__ : Optional[int] = ds.make_dynamic_sampler(lowerCamelCase__ , required_batch_size_multiple=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = [len(lowerCamelCase__ ) for x in batch_sampler]
assert len(set(lowerCamelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase__ ) == len(lowerCamelCase__ ) # no dropped or added examples
UpperCamelCase__ : Any = DataLoader(lowerCamelCase__ , batch_sampler=lowerCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
UpperCamelCase__ : int = []
UpperCamelCase__ : Tuple = []
for batch in data_loader:
UpperCamelCase__ : int = batch['''input_ids'''].shape
UpperCamelCase__ : Any = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCamelCase__ : Tuple = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(lowerCamelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase__ )
assert num_src_per_batch[0] == max(lowerCamelCase__ )
if failures:
raise AssertionError(F"too many tokens in {len(lowerCamelCase__ )} batches" )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = self._get_dataset(max_len=512 )
UpperCamelCase__ : Union[str, Any] = 2
UpperCamelCase__ : Optional[int] = ds.make_sortish_sampler(lowerCamelCase__ , shuffle=lowerCamelCase__ )
UpperCamelCase__ : List[Any] = DataLoader(lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
UpperCamelCase__ : str = DataLoader(lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase__ : int , lowerCamelCase__ : List[Any]="input_ids" ):
return [batch[k].eq(lowerCamelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase__ , k='''labels''' ) ) < sum(count_pad_tokens(lowerCamelCase__ , k='''labels''' ) )
assert sum(count_pad_tokens(lowerCamelCase__ ) ) < sum(count_pad_tokens(lowerCamelCase__ ) )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : List[Any]=1000 , lowerCamelCase__ : Tuple=128 ) -> str:
'''simple docstring'''
if os.getenv('''USE_REAL_DATA''' , lowerCamelCase__ ):
UpperCamelCase__ : List[str] = '''examples/seq2seq/wmt_en_ro'''
UpperCamelCase__ : int = max_len * 2 * 64
if not Path(lowerCamelCase__ ).joinpath('''train.len''' ).exists():
save_len_file(lowerCamelCase__ , lowerCamelCase__ )
else:
UpperCamelCase__ : Optional[Any] = '''examples/seq2seq/test_data/wmt_en_ro'''
UpperCamelCase__ : Optional[Any] = max_len * 4
save_len_file(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Any = SeqaSeqDataset(
lowerCamelCase__ , data_dir=lowerCamelCase__ , type_path='''train''' , max_source_length=lowerCamelCase__ , max_target_length=lowerCamelCase__ , n_obs=lowerCamelCase__ , )
return ds, max_tokens, tokenizer
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = self._get_dataset()
UpperCamelCase__ : Any = set(DistributedSortishSampler(lowerCamelCase__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase__ ) )
UpperCamelCase__ : str = set(DistributedSortishSampler(lowerCamelCase__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase__ ) )
assert idsa.intersection(lowerCamelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Tuple ) -> str:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ , use_fast=lowerCamelCase__ )
if tok_name == MBART_TINY:
UpperCamelCase__ : Dict = SeqaSeqDataset(
lowerCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
UpperCamelCase__ : List[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCamelCase__ : List[Any] = SeqaSeqDataset(
lowerCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
UpperCamelCase__ : Optional[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase__ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase__ ) == 0
| 51 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase = """Create a default config file for Accelerate with only a few flags set."""
def _A ( _lowerCAmelCase="no" , _lowerCAmelCase = default_json_config_file , _lowerCAmelCase = False ):
"""simple docstring"""
__lowercase =Path(_lowerCAmelCase )
path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
__lowercase =mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
__lowercase ={
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
__lowercase =torch.cuda.device_count()
__lowercase =num_gpus
__lowercase =False
if num_gpus > 1:
__lowercase ='MULTI_GPU'
else:
__lowercase ='NO'
elif is_xpu_available() and use_xpu:
__lowercase =torch.xpu.device_count()
__lowercase =num_xpus
__lowercase =False
if num_xpus > 1:
__lowercase ='MULTI_XPU'
else:
__lowercase ='NO'
elif is_npu_available():
__lowercase =torch.npu.device_count()
__lowercase =num_npus
__lowercase =False
if num_npus > 1:
__lowercase ='MULTI_NPU'
else:
__lowercase ='NO'
else:
__lowercase =0
__lowercase =True
__lowercase =1
__lowercase ='NO'
__lowercase =ClusterConfig(**_lowerCAmelCase )
config.to_json_file(_lowerCAmelCase )
return path
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =parser.add_parser('default' , parents=_lowerCAmelCase , help=_lowerCAmelCase , formatter_class=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 166 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase =SpeechTaTokenizer(_lowerCAmelCase)
__lowercase =AddedToken('<mask>' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase)
__lowercase =mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
tokenizer.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase ='this is a test'
__lowercase ='this is a test'
return input_text, output_text
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Dict=2_0 , _lowerCAmelCase : Tuple=5):
'''simple docstring'''
__lowercase , __lowercase =self.get_input_output_texts(_lowerCAmelCase)
__lowercase =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase)
return text, ids
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase ='<pad>'
__lowercase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase) , _lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-4] , 'œ')
self.assertEqual(vocab_keys[-2] , '<mask>')
self.assertEqual(vocab_keys[-1] , '<ctc_blank>')
self.assertEqual(len(_lowerCAmelCase) , 8_1)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 7_9)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.get_tokenizers(do_lower_case=_lowerCAmelCase)
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase =['aaaaa bbbbbb', 'cccccccccdddddddd']
__lowercase =tokenizer.add_tokens(_lowerCAmelCase)
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase))
self.assertEqual(_lowerCAmelCase , all_size + len(_lowerCAmelCase))
__lowercase =tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_lowerCAmelCase)
self.assertGreaterEqual(len(_lowerCAmelCase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
__lowercase ={'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__lowercase =tokenizer.add_special_tokens(_lowerCAmelCase)
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase))
self.assertEqual(_lowerCAmelCase , all_size_a + len(_lowerCAmelCase))
__lowercase =tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_lowerCAmelCase)
self.assertGreaterEqual(len(_lowerCAmelCase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =self.get_tokenizer()
__lowercase =tokenizer.tokenize('This is a test')
# fmt: off
self.assertListEqual(_lowerCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
__lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
__lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase)
# fmt: off
self.assertListEqual(_lowerCAmelCase , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6])
# fmt: on
__lowercase =tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
@slow
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
__lowercase ={
'input_ids': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_lowerCAmelCase , )
| 166 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'transfo-xl'
SCREAMING_SNAKE_CASE = ['mems']
SCREAMING_SNAKE_CASE = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , _lowerCamelCase=267735 , _lowerCamelCase=[20000, 40000, 200000] , _lowerCamelCase=1024 , _lowerCamelCase=1024 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=4096 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=18 , _lowerCamelCase=1600 , _lowerCamelCase=1000 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=-1 , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="normal" , _lowerCamelCase=0.01 , _lowerCamelCase=0.01 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=0 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Dict = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
UpperCAmelCase__ : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase__ : List[Any] = [False] + [False] * len(self.cutoffs )
UpperCAmelCase__ : Dict = d_model
UpperCAmelCase__ : Dict = d_embed
UpperCAmelCase__ : List[Any] = d_head
UpperCAmelCase__ : List[str] = d_inner
UpperCAmelCase__ : Any = div_val
UpperCAmelCase__ : str = pre_lnorm
UpperCAmelCase__ : int = n_layer
UpperCAmelCase__ : Optional[Any] = n_head
UpperCAmelCase__ : Tuple = mem_len
UpperCAmelCase__ : Dict = same_length
UpperCAmelCase__ : Union[str, Any] = attn_type
UpperCAmelCase__ : Optional[int] = clamp_len
UpperCAmelCase__ : str = sample_softmax
UpperCAmelCase__ : Any = adaptive
UpperCAmelCase__ : List[Any] = dropout
UpperCAmelCase__ : List[Any] = dropatt
UpperCAmelCase__ : Tuple = untie_r
UpperCAmelCase__ : str = init
UpperCAmelCase__ : Optional[int] = init_range
UpperCAmelCase__ : Tuple = proj_init_std
UpperCAmelCase__ : str = init_std
UpperCAmelCase__ : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _a (self , _lowerCamelCase ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 166 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'data2vec-audio'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : Dict=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-5 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE_ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_ : str=(1_0, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_9 , SCREAMING_SNAKE_CASE_ : Tuple=5 , SCREAMING_SNAKE_CASE_ : Dict=0.05 , SCREAMING_SNAKE_CASE_ : List[Any]=1_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : str=1_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE_ : int="sum" , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=2_5_6 , SCREAMING_SNAKE_CASE_ : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , SCREAMING_SNAKE_CASE_ : Any=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Any=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_size
lowercase_ = feat_extract_activation
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = conv_bias
lowercase_ = num_conv_pos_embeddings
lowercase_ = num_conv_pos_embedding_groups
lowercase_ = conv_pos_kernel_size
lowercase_ = len(self.conv_dim )
lowercase_ = num_hidden_layers
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = num_attention_heads
lowercase_ = hidden_dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = feat_proj_dropout
lowercase_ = final_dropout
lowercase_ = layerdrop
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = vocab_size
lowercase_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ = mask_time_prob
lowercase_ = mask_time_length
lowercase_ = mask_time_min_masks
lowercase_ = mask_feature_prob
lowercase_ = mask_feature_length
lowercase_ = mask_feature_min_masks
# ctc loss
lowercase_ = ctc_loss_reduction
lowercase_ = ctc_zero_infinity
# adapter
lowercase_ = add_adapter
lowercase_ = adapter_kernel_size
lowercase_ = adapter_stride
lowercase_ = num_adapter_layers
lowercase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = xvector_output_dim
@property
def _lowercase ( self : Any ) -> Optional[int]:
return math.prod(self.conv_stride )
| 30 |
lowerCamelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def snake_case_ ( ):
__lowercase : List[str] = input("""Enter message: """ )
__lowercase : int = input("""Enter key [alphanumeric]: """ )
__lowercase : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowercase : Optional[int] = """encrypt"""
__lowercase : Dict = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
elif mode.lower().startswith("""d""" ):
__lowercase : Union[str, Any] = """decrypt"""
__lowercase : Optional[int] = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
print(F"\n{mode.title()}ed message:" )
print(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """encrypt""" )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """decrypt""" )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Union[str, Any] = []
__lowercase : Tuple = 0
__lowercase : Dict = key.upper()
for symbol in message:
__lowercase : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase_ ):
__lowercase : str = 0
else:
translated.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
main() | 233 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowerCAmelCase ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self , UpperCamelCase__ = 768 , ) -> List[str]:
'''simple docstring'''
super().__init__()
snake_case : int = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
snake_case : Any = nn.Parameter(torch.ones(1 , UpperCamelCase__ ) )
def lowerCamelCase ( self , UpperCamelCase__ = None , UpperCamelCase__ = None , ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = nn.Parameter(self.mean.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
snake_case : Dict = nn.Parameter(self.std.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
return self
def lowerCamelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : List[str] = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
snake_case : int = (embeds * self.std) + self.mean
return embeds
| 112 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MgpstrTokenizer
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : List[str] = False
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case : List[str] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = "tester"
snake_case : List[str] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : int = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
snake_case : List[Any] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case : str = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
snake_case : List[Any] = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
snake_case ,snake_case : Union[str, Any] = self.get_input_output_texts(UpperCamelCase__ )
snake_case : Dict = tokenizer.tokenize(UpperCamelCase__ )
snake_case : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
snake_case : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
snake_case : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , UpperCamelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
| 112 | 1 |
'''simple docstring'''
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = [
[],
[],
[],
]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(__UpperCAmelCase )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def UpperCAmelCase_ ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ) -> str:
return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ) -> List[Any]:
lowerCAmelCase__ : List[Any] = []
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> int:
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
lowerCAmelCase__ : Optional[int] = min(self.queue )
self.queue.remove(__UpperCAmelCase )
return data
def __str__( self ) -> str:
return str(self.queue )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 37 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: str = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_UpperCAmelCase : str = 6378137.0
_UpperCAmelCase : Union[str, Any] = 6356752.314245
_UpperCAmelCase : Tuple = 6_37_81_37
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
lowerCamelCase__ : str = (AXIS_A - AXIS_B) / AXIS_A
lowerCamelCase__ : List[Any] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
lowerCamelCase__ : List[Any] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
lowerCamelCase__ : Any = radians(_UpperCAmelCase )
lowerCamelCase__ : List[str] = radians(_UpperCAmelCase )
# Equation
lowerCamelCase__ : Optional[Any] = sin((phi_a - phi_a) / 2 )
lowerCamelCase__ : List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowerCamelCase__ : Any = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
if digit_amount > 0:
return round(number - int(_UpperCAmelCase ) , _UpperCAmelCase )
return number - int(_UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 45 | 0 |
'''simple docstring'''
lowerCamelCase : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCamelCase : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _lowerCAmelCase ( _UpperCamelCase : dict[int, list[int]] , _UpperCamelCase : int , _UpperCamelCase : list[bool] ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
order.append(_UpperCamelCase )
return order
def _lowerCAmelCase ( _UpperCamelCase : dict[int, list[int]] , _UpperCamelCase : int , _UpperCamelCase : list[bool] ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return component
def _lowerCAmelCase ( _UpperCamelCase : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase ) * [False]
_SCREAMING_SNAKE_CASE ={vert: [] for vert in range(len(_UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[]
for i, was_visited in enumerate(_UpperCamelCase ):
if not was_visited:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase ) * [False]
for i in range(len(_UpperCamelCase ) ):
_SCREAMING_SNAKE_CASE =order[len(_UpperCamelCase ) - i - 1]
if not visited[vert]:
_SCREAMING_SNAKE_CASE =find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
components_list.append(_UpperCamelCase )
return components_list
| 47 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__UpperCAmelCase : Optional[Any] = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowerCamelCase ( self , UpperCamelCase__=0 ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase__ ) )
snake_case : Any = torch.manual_seed(UpperCamelCase__ )
snake_case : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Tuple = self.get_dummy_inputs()
snake_case : int = pipe(**UpperCamelCase__ ).images
snake_case : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
snake_case : Optional[int] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Tuple = self.get_dummy_inputs()
snake_case : Optional[Any] = pipe(**UpperCamelCase__ ).images
snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Dict = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Any = self.get_dummy_inputs()
snake_case : List[Any] = pipe(**UpperCamelCase__ ).images
snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Optional[Any] = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = self.get_dummy_inputs()
snake_case : Union[str, Any] = pipe(**UpperCamelCase__ ).images
snake_case : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Union[str, Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Any = self.get_dummy_inputs()
snake_case : int = pipe(**UpperCamelCase__ ).images
snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Union[str, Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = ort.SessionOptions()
snake_case : str = False
return options
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case : Tuple = init_image.resize((128, 128) )
# using the PNDM scheduler by default
snake_case : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : List[Any] = "A fantasy landscape, trending on artstation"
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : int = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="np" , )
snake_case : str = output.images
snake_case : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
snake_case : int = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case : Optional[Any] = init_image.resize((128, 128) )
snake_case : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
snake_case : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Union[str, Any] = "A fantasy landscape, trending on artstation"
snake_case : Tuple = torch.manual_seed(0 )
snake_case : str = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type="np" , )
snake_case : List[Any] = output.images
snake_case : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
snake_case : int = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 203 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__snake_case : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case : Optional[int] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__snake_case : Optional[Any] = np.concatenate(__SCREAMING_SNAKE_CASE , axis=0 )
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 2_55.0
__snake_case : List[Any] = image.transpose(0 , 3 , 1 , 2 )
__snake_case : Optional[Any] = 2.0 * image - 1.0
__snake_case : str = torch.from_numpy(__SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
__snake_case : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
return image
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.99_95 ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case : List[str] = True
__snake_case : Union[str, Any] = va.device
__snake_case : List[Any] = va.cpu().numpy()
__snake_case : Dict = va.cpu().numpy()
__snake_case : str = np.sum(va * va / (np.linalg.norm(__SCREAMING_SNAKE_CASE ) * np.linalg.norm(__SCREAMING_SNAKE_CASE )) )
if np.abs(__SCREAMING_SNAKE_CASE ) > DOT_THRESHOLD:
__snake_case : int = (1 - t) * va + t * va
else:
__snake_case : int = np.arccos(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = np.sin(__SCREAMING_SNAKE_CASE )
__snake_case : Dict = theta_a * t
__snake_case : Tuple = np.sin(__SCREAMING_SNAKE_CASE )
__snake_case : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
__snake_case : int = sin_theta_t / sin_theta_a
__snake_case : int = sa * va + sa * va
if inputs_are_torch:
__snake_case : Optional[int] = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
return va
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Optional[int] = F.normalize(__SCREAMING_SNAKE_CASE , dim=-1 )
__snake_case : Tuple = F.normalize(__SCREAMING_SNAKE_CASE , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
for param in model.parameters():
__snake_case : Union[str, Any] = value
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Optional[Any] , _lowerCAmelCase : AutoencoderKL , _lowerCAmelCase : CLIPTextModel , _lowerCAmelCase : CLIPModel , _lowerCAmelCase : CLIPTokenizer , _lowerCAmelCase : UNetaDConditionModel , _lowerCAmelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _lowerCAmelCase : CLIPFeatureExtractor , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : List[str]=None , ):
super().__init__()
self.register_modules(
vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , clip_model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , coca_model=_lowerCAmelCase , coca_tokenizer=_lowerCAmelCase , coca_transform=_lowerCAmelCase , )
__snake_case : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , _lowerCAmelCase )
else feature_extractor.size["""shortest_edge"""]
)
__snake_case : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _lowerCAmelCase )
set_requires_grad(self.clip_model , _lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
self.enable_attention_slicing(_lowerCAmelCase )
def snake_case__ ( self : Any ):
set_requires_grad(self.vae , _lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
set_requires_grad(self.vae , _lowerCAmelCase )
def snake_case__ ( self : List[Any] ):
set_requires_grad(self.unet , _lowerCAmelCase )
def snake_case__ ( self : str ):
set_requires_grad(self.unet , _lowerCAmelCase )
def snake_case__ ( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# get the original timestep using init_timestep
__snake_case : Optional[Any] = min(int(num_inference_steps * strength ) , _lowerCAmelCase )
__snake_case : Optional[int] = max(num_inference_steps - init_timestep , 0 )
__snake_case : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any]=None ):
if not isinstance(_lowerCAmelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_lowerCAmelCase )}''' )
__snake_case : int = image.to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCAmelCase )
]
__snake_case : Dict = torch.cat(_lowerCAmelCase , dim=0 )
else:
__snake_case : int = self.vae.encode(_lowerCAmelCase ).latent_dist.sample(_lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : Union[str, Any] = 0.18215 * init_latents
__snake_case : Union[str, Any] = init_latents.repeat_interleave(_lowerCAmelCase , dim=0 )
__snake_case : Tuple = randn_tensor(init_latents.shape , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
# get latents
__snake_case : Dict = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = init_latents
return latents
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Tuple ):
__snake_case : Optional[Any] = self.coca_transform(_lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__snake_case : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__snake_case : Optional[int] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ):
__snake_case : List[Any] = self.feature_extractor.preprocess(_lowerCAmelCase )
__snake_case : str = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
__snake_case : Any = self.clip_model.get_image_features(_lowerCAmelCase )
__snake_case : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCAmelCase )
__snake_case : Optional[int] = image_embeddings_clip.repeat_interleave(_lowerCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def snake_case__ ( self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , ):
__snake_case : Tuple = latents.detach().requires_grad_()
__snake_case : Optional[int] = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
__snake_case : Optional[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__snake_case : Optional[Any] = self.scheduler.alphas_cumprod[timestep]
__snake_case : Optional[Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case : Dict = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__snake_case : Any = torch.sqrt(_lowerCAmelCase )
__snake_case : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _lowerCAmelCase ):
__snake_case : List[str] = self.scheduler.sigmas[index]
__snake_case : List[str] = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : Any = 1 / 0.18215 * sample
__snake_case : str = self.vae.decode(_lowerCAmelCase ).sample
__snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : List[str] = transforms.Resize(self.feature_extractor_size )(_lowerCAmelCase )
__snake_case : Any = self.normalize(_lowerCAmelCase ).to(latents.dtype )
__snake_case : str = self.clip_model.get_image_features(_lowerCAmelCase )
__snake_case : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCAmelCase )
__snake_case : Optional[int] = spherical_dist_loss(_lowerCAmelCase , _lowerCAmelCase ).mean() * clip_guidance_scale
__snake_case : List[Any] = -torch.autograd.grad(_lowerCAmelCase , _lowerCAmelCase )[0]
if isinstance(self.scheduler , _lowerCAmelCase ):
__snake_case : Optional[Any] = latents.detach() + grads * (sigma**2)
__snake_case : Any = noise_pred_original
else:
__snake_case : int = noise_pred_original - torch.sqrt(_lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : int , _lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , _lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[int] = 5_12 , _lowerCAmelCase : Optional[int] = 5_12 , _lowerCAmelCase : float = 0.6 , _lowerCAmelCase : Optional[int] = 50 , _lowerCAmelCase : Optional[float] = 7.5 , _lowerCAmelCase : Optional[int] = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[float] = 1_00 , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , _lowerCAmelCase : float = 0.8 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_lowerCAmelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_lowerCAmelCase , torch.Generator ) and batch_size > 1:
__snake_case : List[str] = [generator] + [None] * (batch_size - 1)
__snake_case : str = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
__snake_case : Tuple = [x[0] for x in coca_is_none if x[1]]
__snake_case : Optional[int] = """, """.join(_lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__snake_case : Optional[int] = self.get_image_description(_lowerCAmelCase )
if style_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__snake_case : Any = self.get_image_description(_lowerCAmelCase )
# get prompt text embeddings for content and style
__snake_case : List[Any] = self.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""pt""" , )
__snake_case : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__snake_case : Any = self.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""pt""" , )
__snake_case : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__snake_case : str = slerp(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
__snake_case : str = text_embeddings.repeat_interleave(_lowerCAmelCase , dim=0 )
# set timesteps
__snake_case : str = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__snake_case : Tuple = {}
if accepts_offset:
__snake_case : Optional[Any] = 1
self.scheduler.set_timesteps(_lowerCAmelCase , **_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__snake_case , __snake_case : Any = self.get_timesteps(_lowerCAmelCase , _lowerCAmelCase , self.device )
__snake_case : Union[str, Any] = timesteps[:1].repeat(_lowerCAmelCase )
# Preprocess image
__snake_case : List[str] = preprocess(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : str = self.prepare_latents(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text_embeddings.dtype , self.device , _lowerCAmelCase )
__snake_case : Optional[Any] = preprocess(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = self.prepare_latents(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text_embeddings.dtype , self.device , _lowerCAmelCase )
__snake_case : int = slerp(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if clip_guidance_scale > 0:
__snake_case : Tuple = self.get_clip_image_embeddings(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Tuple = self.get_clip_image_embeddings(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : List[Any] = slerp(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : Optional[Any] = content_text_input.input_ids.shape[-1]
__snake_case : List[Any] = self.tokenizer([""""""] , padding="""max_length""" , max_length=_lowerCAmelCase , return_tensors="""pt""" )
__snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__snake_case : str = uncond_embeddings.repeat_interleave(_lowerCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__snake_case : Any = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device="""cpu""" , dtype=_lowerCAmelCase ).to(
self.device )
else:
__snake_case : int = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : Dict = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Dict = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[Any] = {}
if accepts_eta:
__snake_case : int = eta
# check if the scheduler accepts generator
__snake_case : Optional[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__snake_case : Tuple = generator
with self.progress_bar(total=_lowerCAmelCase ):
for i, t in enumerate(_lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
__snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : int = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
__snake_case : Tuple = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : Any = noise_pred.chunk(2 )
__snake_case : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__snake_case : Tuple = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__snake_case , __snake_case : Dict = self.cond_fn(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[int] = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : Dict = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(_lowerCAmelCase ).sample
__snake_case : int = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Optional[Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase , nsfw_content_detected=_lowerCAmelCase )
| 20 | import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 20 | 1 |
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
lowercase_ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase_ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase_ = j
return prefix_result
def a ( snake_case__: str ):
'''simple docstring'''
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 | """simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = AlbertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
A__ = AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 221 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Tuple = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowercase : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowercase : str = {'''facebook/blenderbot-3B''': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase__ ( ):
snake_case_ : Tuple = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case_ : Tuple = bs[:]
snake_case_ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
snake_case_ : Tuple = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCAmelCase__ ( _a : Union[str, Any] ):
snake_case_ : Union[str, Any] = set()
snake_case_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : Any = char
return pairs
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Any = VOCAB_FILES_NAMES
A : Tuple = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
snake_case_ : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
snake_case_ : List[str] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
snake_case_ : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
snake_case_ : List[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
snake_case_ : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token
snake_case_ : int = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
snake_case_ : str = json.load(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case_ : Union[str, Any] = errors # how to handle errors in decoding
snake_case_ : Union[str, Any] = bytes_to_unicode()
snake_case_ : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
snake_case_ : Any = merges_handle.read().split("\n" )[1:-1]
snake_case_ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : List[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : int = {}
snake_case_ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : Any = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self ) -> int:
return len(self.encoder )
def _lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
if token in self.cache:
return self.cache[token]
snake_case_ : Optional[Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
snake_case_ : List[str] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ : Optional[int] = bigram
snake_case_ : Union[str, Any] = []
snake_case_ : Any = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
snake_case_ : List[str] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : List[Any] = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : int = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
snake_case_ : int = get_pairs(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = " ".join(_SCREAMING_SNAKE_CASE )
snake_case_ : str = word
return word
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ : Dict = []
for token in re.findall(self.pat , _SCREAMING_SNAKE_CASE ):
snake_case_ : List[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) )
return bpe_tokens
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ : Any = "".join(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : int = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : List[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + "\n" )
snake_case_ : str = 0
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case_ : str = token_index
writer.write(" ".join(_SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : Optional[Any] = [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
snake_case_ : List[Any] = " " + text
return (text, kwargs)
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[int]:
snake_case_ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = " ".join(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = self.encode(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
snake_case_ : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 355 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : Tuple = resample
snake_case_ : Dict = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : int = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : Optional[int] = do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
snake_case_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Optional[int] = image_std if image_std is not None else self.image_std
snake_case_ : Optional[Any] = size if size is not None else self.size
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : int = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case_ : Optional[Any] = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
snake_case_ : List[Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case_ : List[str] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : int = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : List[str] = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 36 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def _UpperCamelCase ( lowercase__ ):
# getting number of pixels in the image
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase__ ):
for j in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__lowerCAmelCase : Optional[Any] =imread('image_data/lena.jpg', 1)
# convert to its negative
__lowerCAmelCase : Union[str, Any] =convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 9 |
"""simple docstring"""
import numpy as np
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> np.ndarray:
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any]=0.999 , _SCREAMING_SNAKE_CASE : Union[str, Any]="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase_ : List[Any] = []
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = i / num_diffusion_timesteps
UpperCAmelCase_ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : List[str] =[e.name for e in KarrasDiffusionSchedulers]
__A : List[Any] =2
@register_to_config
def __init__( self ,_snake_case = 10_00 ,_snake_case = 0.00085 ,_snake_case = 0.012 ,_snake_case = "linear" ,_snake_case = None ,_snake_case = "epsilon" ,_snake_case = False ,_snake_case = False ,_snake_case = 1.0 ,_snake_case = "linspace" ,_snake_case = 0 ,):
if trained_betas is not None:
UpperCAmelCase_ : Optional[Any] = torch.tensor(_snake_case ,dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ : Optional[Any] = torch.linspace(_snake_case ,_snake_case ,_snake_case ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ : str = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_snake_case ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ : Optional[int] = betas_for_alpha_bar(_snake_case ,alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ : List[str] = betas_for_alpha_bar(_snake_case ,alpha_transform_type="exp" )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCAmelCase_ : Tuple = 1.0 - self.betas
UpperCAmelCase_ : Optional[Any] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = use_karras_sigmas
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ):
if schedule_timesteps is None:
UpperCAmelCase_ : Dict = self.timesteps
UpperCAmelCase_ : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ : List[str] = 1 if len(_snake_case ) > 1 else 0
else:
UpperCAmelCase_ : Tuple = timestep.cpu().item() if torch.is_tensor(_snake_case ) else timestep
UpperCAmelCase_ : Dict = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,):
UpperCAmelCase_ : str = self.index_for_timestep(_snake_case )
UpperCAmelCase_ : List[str] = self.sigmas[step_index]
UpperCAmelCase_ : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,):
UpperCAmelCase_ : Tuple = num_inference_steps
UpperCAmelCase_ : int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ : Optional[int] = np.linspace(0 ,num_train_timesteps - 1 ,_snake_case ,dtype=_snake_case )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ : List[str] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : Any = (np.arange(0 ,_snake_case ) * step_ratio).round()[::-1].copy().astype(_snake_case )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : str = (np.arange(_snake_case ,0 ,-step_ratio )).round().copy().astype(_snake_case )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
UpperCAmelCase_ : Optional[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ : Union[str, Any] = np.log(_snake_case )
UpperCAmelCase_ : Dict = np.interp(_snake_case ,np.arange(0 ,len(_snake_case ) ) ,_snake_case )
if self.config.use_karras_sigmas:
UpperCAmelCase_ : List[str] = self._convert_to_karras(in_sigmas=_snake_case ,num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ : int = np.array([self._sigma_to_t(_snake_case ,_snake_case ) for sigma in sigmas] )
UpperCAmelCase_ : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ : List[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case )
UpperCAmelCase_ : List[str] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ : Dict = torch.from_numpy(_snake_case )
UpperCAmelCase_ : Optional[Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_snake_case ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ : List[Any] = timesteps.to(_snake_case ,dtype=torch.floataa )
else:
UpperCAmelCase_ : Dict = timesteps.to(device=_snake_case )
# empty dt and derivative
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ : Optional[Any] = defaultdict(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
# get log sigma
UpperCAmelCase_ : Tuple = np.log(_snake_case )
# get distribution
UpperCAmelCase_ : List[str] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ : Any = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ : Tuple = low_idx + 1
UpperCAmelCase_ : Union[str, Any] = log_sigmas[low_idx]
UpperCAmelCase_ : Tuple = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ : List[str] = (low - log_sigma) / (low - high)
UpperCAmelCase_ : Optional[int] = np.clip(_snake_case ,0 ,1 )
# transform interpolation to time range
UpperCAmelCase_ : str = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ : Any = t.reshape(sigma.shape )
return t
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : float = in_sigmas[-1].item()
UpperCAmelCase_ : float = in_sigmas[0].item()
UpperCAmelCase_ : Optional[Any] = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ : Dict = np.linspace(0 ,1 ,_snake_case )
UpperCAmelCase_ : str = sigma_min ** (1 / rho)
UpperCAmelCase_ : int = sigma_max ** (1 / rho)
UpperCAmelCase_ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase__ ( self ):
return self.dt is None
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = True ,):
UpperCAmelCase_ : Union[str, Any] = self.index_for_timestep(_snake_case )
# advance index counter by 1
UpperCAmelCase_ : List[Any] = timestep.cpu().item() if torch.is_tensor(_snake_case ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ : int = self.sigmas[step_index]
UpperCAmelCase_ : Any = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ : Union[str, Any] = self.sigmas[step_index - 1]
UpperCAmelCase_ : Union[str, Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ : List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ : Dict = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ : List[str] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
UpperCAmelCase_ : int = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ : Dict = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ : List[Any] = derivative
UpperCAmelCase_ : Optional[Any] = dt
UpperCAmelCase_ : int = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ : str = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ : int = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ : Optional[int] = self.dt
UpperCAmelCase_ : int = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Dict = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ : Union[str, Any] = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_snake_case ):
# mps does not support float64
UpperCAmelCase_ : List[str] = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
UpperCAmelCase_ : int = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
UpperCAmelCase_ : int = self.timesteps.to(original_samples.device )
UpperCAmelCase_ : Any = timesteps.to(original_samples.device )
UpperCAmelCase_ : Optional[Any] = [self.index_for_timestep(_snake_case ,_snake_case ) for t in timesteps]
UpperCAmelCase_ : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : int = sigma.unsqueeze(-1 )
UpperCAmelCase_ : Optional[int] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 67 |
'''simple docstring'''
import re
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
if len(re.findall("[ATCG]" , _SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__snake_case = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["""input_ids""", """attention_mask"""]
_lowercase = TaTokenizer
_lowercase = []
def __init__( self: List[str],A_: List[str]=None,A_: int=None,A_: int="</s>",A_: Tuple="<unk>",A_: Dict="<pad>",A_: Union[str, Any]=100,A_: Optional[Any]=None,**A_: List[str],):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__UpperCamelCase = [F'''<extra_id_{i}>''' for i in range(A_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__UpperCamelCase = len(set(filter(lambda A_ : bool('extra_id_' in str(A_ ) ),A_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
A_,tokenizer_file=A_,eos_token=A_,unk_token=A_,pad_token=A_,extra_ids=A_,additional_special_tokens=A_,**A_,)
__UpperCamelCase = vocab_file
__UpperCamelCase = False if not self.vocab_file else True
__UpperCamelCase = extra_ids
@staticmethod
def snake_case_ ( A_: str,A_: Union[str, Any],A_: Dict ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__UpperCamelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.',A_,)
return max_model_length
def snake_case_ ( self: Union[str, Any],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file,A_ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def snake_case_ ( self: Dict,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__UpperCamelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case_ ( self: str ):
'''simple docstring'''
return list(
set(filter(lambda A_ : bool(re.search(r'<extra_id_\d+>',A_ ) ) is not None,self.additional_special_tokens ) ) )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(A_ ) for token in self.get_sentinel_tokens()]
| 310 |
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0 ):
__lowercase = 2**power
__lowercase = 0
while n:
__lowercase , __lowercase = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 370 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "gpt_neox_japanese"
def __init__(self ,_lowerCamelCase=32000 ,_lowerCamelCase=2560 ,_lowerCamelCase=32 ,_lowerCamelCase=32 ,_lowerCamelCase=4 ,_lowerCamelCase="gelu" ,_lowerCamelCase=1.0_0 ,_lowerCamelCase=10000 ,_lowerCamelCase=2048 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-5 ,_lowerCamelCase=True ,_lowerCamelCase=31996 ,_lowerCamelCase=31999 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.0 ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_multiple_size
__lowercase = hidden_act
__lowercase = rotary_pct
__lowercase = rotary_emb_base
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = attention_dropout
__lowercase = hidden_dropout
| 217 | 0 |
from collections import deque
from .hash_table import HashTable
class __snake_case ( a ):
def __init__( self : Union[str, Any] , *_snake_case : List[str] , **_snake_case : Tuple):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
def lowerCamelCase ( self : int , _snake_case : List[Any] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case)
UpperCAmelCase_ = self.values[key]
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
sum(self.charge_factor - len(_snake_case) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def lowerCamelCase ( self : List[Any] , _snake_case : Dict , _snake_case : List[str]=None):
"""simple docstring"""
if not (
len(self.values[key]) == self.charge_factor and self.values.count(_snake_case) == 0
):
return key
return super()._collision_resolution(_snake_case , _snake_case)
| 51 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A (__A : Optional[int] , __A : int , __A : str=None ) -> List[Any]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
def A (__A : Tuple , __A : Dict , __A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : Optional[Any] , __A : Any , __A : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
UpperCAmelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : int , __A : Union[str, Any] , __A : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = weights[0][0][0]
UpperCAmelCase_ = np.asarray(layer_norm_a[0] )
UpperCAmelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# lsh weights + output
UpperCAmelCase_ = weights[0][1]
if len(__A ) < 4:
set_layer_weights_in_torch_lsh(__A , torch_block.attention , __A )
else:
set_layer_weights_in_torch_local(__A , torch_block.attention , __A )
# intermediate weighs
UpperCAmelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__A ) == 4:
UpperCAmelCase_ = intermediate_weights[2]
# layernorm 2
UpperCAmelCase_ = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# intermediate dense
UpperCAmelCase_ = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
# intermediate out
UpperCAmelCase_ = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Optional[int] , __A : Tuple , __A : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = torch_model.reformer
# word embeds
UpperCAmelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__A ) , )
if isinstance(weights[3] , __A ):
UpperCAmelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
UpperCAmelCase_ = nn.Parameter(torch.tensor(__A ) )
UpperCAmelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__A , __A , __A )
# output layer norm
UpperCAmelCase_ = np.asarray(weights[7][0] )
UpperCAmelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# output embeddings
UpperCAmelCase_ = np.asarray(weights[9][0] )
UpperCAmelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Tuple , __A : int , __A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = ReformerConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = ReformerModelWithLMHead(__A )
with open(__A , '''rb''' ) as f:
UpperCAmelCase_ = pickle.load(__A )['''weights''']
set_model_weights_in_torch(__A , __A , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 51 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a = logging.get_logger(__name__)
__a = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
A : Dict = 'convnextv2'
def __init__( self , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**__A )
lowercase : int = num_channels
lowercase : Union[str, Any] = patch_size
lowercase : Optional[int] = num_stages
lowercase : Optional[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowercase : Dict = [3, 3, 9, 3] if depths is None else depths
lowercase : Tuple = hidden_act
lowercase : Union[str, Any] = initializer_range
lowercase : Dict = layer_norm_eps
lowercase : Union[str, Any] = drop_path_rate
lowercase : Union[str, Any] = image_size
lowercase : int = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowercase : List[Any] = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 361 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__a = 50_00_00
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Optional[Any] = dataset.map(**_UpperCamelCase )
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : int = dataset.filter(**_UpperCamelCase )
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Dict = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase : List[str] = generate_example_dataset(
os.path.join(_UpperCamelCase, '''dataset.arrow''' ), _UpperCamelCase, num_examples=_UpperCamelCase )
lowercase : List[Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=_UpperCamelCase )
def tokenize(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
lowercase : Union[str, Any] = map(_UpperCamelCase )
lowercase : Dict = map(_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''numpy''' ):
lowercase : Dict = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''pandas''' ):
lowercase : Any = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
lowercase : str = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
lowercase : List[str] = map(_UpperCamelCase, function=_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Any = filter(_UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCamelCase, '''wb''' ) as f:
f.write(json.dumps(_UpperCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 173 | 0 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _A ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_lowerCAmelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.""" )
if i == 0:
__lowercase , __lowercase =(
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , stopping_strategy=_lowerCAmelCase )
else:
return _interleave_iterable_datasets(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , stopping_strategy=_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_lowerCAmelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.""" )
if i == 0:
__lowercase , __lowercase =(
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , axis=_lowerCAmelCase )
else:
return _concatenate_iterable_datasets(_lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , axis=_lowerCAmelCase )
| 166 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCamelCase ( A ):
'''simple docstring'''
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =tempfile.mkdtemp()
__lowercase =5
# Realm tok
__lowercase =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase =os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
__lowercase =os.path.join(_lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__lowercase =os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =RealmConfig(num_block_records=self.num_block_records)
return config
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_lowerCAmelCase , )
return block_records
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.get_config()
__lowercase =self.get_dummy_retriever()
__lowercase =retriever.tokenizer
__lowercase =np.array([0, 3] , dtype='long')
__lowercase =tokenizer(['Test question']).input_ids
__lowercase =tokenizer(
['the fourth'] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids
__lowercase =config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase =retriever(
_lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np')
self.assertEqual(len(_lowerCAmelCase) , 2)
self.assertEqual(len(_lowerCAmelCase) , 2)
self.assertEqual(len(_lowerCAmelCase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.get_config()
__lowercase =self.get_dummy_retriever()
__lowercase =retriever.tokenizer
__lowercase =np.array([0, 3, 5] , dtype='long')
__lowercase =tokenizer(['Test question']).input_ids
__lowercase =tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids
__lowercase =config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase =retriever(
_lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np')
self.assertEqual([False, True, True] , _lowerCAmelCase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCAmelCase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
__lowercase =retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , B'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
__lowercase =os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
__lowercase =RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , B'This is the first record')
| 166 | 1 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCAmelCase : List[Any] = os.path.join(git_repo_path, '''src''', '''diffusers''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = find_backend(" if not is_torch_available():" )
self.assertEqual(__lowerCAmelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_lowerCamelCase : Optional[Any] = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(__lowerCAmelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_lowerCamelCase : Any = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(__lowerCAmelCase ,"torch_and_transformers_and_onnx" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,__lowerCAmelCase )
self.assertIn("torch_and_transformers" ,__lowerCAmelCase )
self.assertIn("flax_and_transformers" ,__lowerCAmelCase )
self.assertIn("torch_and_transformers_and_onnx" ,__lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(__lowerCAmelCase ,"\nCONSTANT = None\n" )
_lowerCamelCase : Tuple = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
__lowerCAmelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
_lowerCamelCase : Dict = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
_lowerCamelCase : Optional[int] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
_lowerCamelCase : List[str] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,__lowerCAmelCase ) | 351 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__'''
_lowerCAmelCase : Dict = '''Dummy User'''
_lowerCAmelCase : Optional[int] = '''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'''
_lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co'''
_lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(_lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return HfApi(endpoint=_lowerCamelCase )
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = HfFolder.get_token()
HfFolder.save_token(_lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase ):
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 340 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=1_3 , lowerCAmelCase__ : int=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=9_9 , lowerCAmelCase__ : List[str]=6_4 , lowerCAmelCase__ : Union[str, Any]=5 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : Optional[Any]=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : Tuple=1_6 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Dict=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : Any = use_input_mask
__SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : int = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_act
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : int = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Dict = scope
__SCREAMING_SNAKE_CASE : int = vocab_size - 1
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : List[str] = True
return config, input_ids, input_mask, token_labels
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = GPTNeoXModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : Dict = GPTNeoXModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : List[str] = GPTNeoXForQuestionAnswering(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : str = GPTNeoXForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
__SCREAMING_SNAKE_CASE : str = GPTNeoXForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : str = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : Any = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = output_from_no_past["""hidden_states"""][0]
__SCREAMING_SNAKE_CASE : Dict = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["""hidden_states"""][0]
# select random slice
__SCREAMING_SNAKE_CASE : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : int = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_A : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_A : Any = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Dict = False
_A : Dict = False
_A : str = False
_A : Optional[int] = False
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = GPTNeoXModelTester(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=6_4 , num_attention_heads=8 )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([1, 1_0] , config.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE : Any = GPTNeoXModel(lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
original_model.eval()
__SCREAMING_SNAKE_CASE : Any = original_model(lowerCAmelCase__ ).last_hidden_state
__SCREAMING_SNAKE_CASE : int = original_model(lowerCAmelCase__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE : Union[str, Any] = {"""type""": scaling_type, """factor""": 10.0}
__SCREAMING_SNAKE_CASE : Optional[int] = GPTNeoXModel(lowerCAmelCase__ )
scaled_model.to(lowerCAmelCase__ )
scaled_model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = scaled_model(lowerCAmelCase__ ).last_hidden_state
__SCREAMING_SNAKE_CASE : Optional[Any] = scaled_model(lowerCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__SCREAMING_SNAKE_CASE : Tuple = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
__SCREAMING_SNAKE_CASE : Optional[int] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) | 112 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase__ : Tuple = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : List[Any] = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : Any = bs[:]
__SCREAMING_SNAKE_CASE : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : str = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Any = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]="replace" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : List[str]="</s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : int="<mask>" , lowerCAmelCase__ : str=False , **lowerCAmelCase__ : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : str = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : int = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : int = {}
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = bigram
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Dict = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Dict = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Tuple = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : Tuple = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : List[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=False , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : int = """ """ + text
return (text, kwargs)
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__SCREAMING_SNAKE_CASE : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__SCREAMING_SNAKE_CASE : str = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__SCREAMING_SNAKE_CASE : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 112 | 1 |
"""simple docstring"""
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
snake_case_ = logging.get_logger(__name__)
@dataclass
class A_ :
"""simple docstring"""
def __init__( self :str , lowercase_ :Optional[int]=False , lowercase_ :Optional[Any]=False , lowercase_ :str=6.0 , lowercase_ :Optional[Any]=None , lowercase_ :Any=False , lowercase_ :int=False , lowercase_ :Tuple=None , lowercase_ :str="fp4" , lowercase_ :Optional[int]=False , **lowercase_ :Any , ) -> Dict:
UpperCAmelCase = load_in_abit
UpperCAmelCase = load_in_abit
UpperCAmelCase = llm_inta_threshold
UpperCAmelCase = llm_inta_skip_modules
UpperCAmelCase = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase = llm_inta_has_fpaa_weight
UpperCAmelCase = bnb_abit_quant_type
UpperCAmelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase = torch.floataa
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = getattr(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , torch.dtype ):
UpperCAmelCase = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def UpperCAmelCase__ ( self :Any ) -> Any:
if not isinstance(self.llm_inta_threshold , lowercase_ ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowercase_ ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowercase_ ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowercase_ ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , lowercase_ ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , lowercase_ ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def UpperCAmelCase__ ( self :Dict ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase__ ( self :Any ) -> Any:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase__ ( cls :List[str] , lowercase_ :int , lowercase_ :Optional[Any] , **lowercase_ :Union[str, Any] ) -> Optional[int]:
UpperCAmelCase = cls(**lowercase_ )
UpperCAmelCase = []
for key, value in kwargs.items():
if hasattr(lowercase_ , lowercase_ ):
setattr(lowercase_ , lowercase_ , lowercase_ )
to_remove.append(lowercase_ )
for key in to_remove:
kwargs.pop(lowercase_ , lowercase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase__ ( self :str , lowercase_ :Union[str, os.PathLike] ) -> Any:
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
UpperCAmelCase = self.to_dict()
UpperCAmelCase = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + '\n'
writer.write(lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict[str, Any]:
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self :Union[str, Any] ) -> int:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :bool = True ) -> str:
if use_diff is True:
UpperCAmelCase = self.to_diff_dict()
else:
UpperCAmelCase = self.to_dict()
return json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + "\n"
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Dict[str, Any]:
UpperCAmelCase = self.to_dict()
# get the default config dict
UpperCAmelCase = BitsAndBytesConfig().to_dict()
UpperCAmelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase = value
return serializable_config_dict
| 359 |
"""simple docstring"""
from collections import deque
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :str , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = process_name # process name
UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase = arrival_time
UpperCAmelCase = burst_time # remaining burst time
UpperCAmelCase = 0 # total time of the process wait in ready queue
UpperCAmelCase = 0 # time from arrival time to completion time
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :int , lowercase_ :list[int] , lowercase_ :deque[Process] , lowercase_ :int , ) -> None:
# total number of mlfq's queues
UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase = queue
# current time
UpperCAmelCase = current_time
# finished process is in this sequence queue
UpperCAmelCase = deque()
def UpperCAmelCase__ ( self :Optional[int] ) -> list[str]:
UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase__ ( self :Dict , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase__ ( self :str , lowercase_ :deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def UpperCAmelCase__ ( self :int , lowercase_ :Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :deque[Process] ) -> deque[Process]:
UpperCAmelCase = deque() # sequence deque of finished process
while len(lowercase_ ) != 0:
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase = 0
# set the process's turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase__ ( self :Tuple , lowercase_ :deque[Process] , lowercase_ :int ) -> tuple[deque[Process], deque[Process]]:
UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase_ ) ):
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase = 0
# set the finish time
UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase__ ( self :Optional[Any] ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase , UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ = Process("""P1""", 0, 53)
snake_case_ = Process("""P2""", 0, 17)
snake_case_ = Process("""P3""", 0, 68)
snake_case_ = Process("""P4""", 0, 24)
snake_case_ = 3
snake_case_ = [17, 25]
snake_case_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
snake_case_ = Process("""P1""", 0, 53)
snake_case_ = Process("""P2""", 0, 17)
snake_case_ = Process("""P3""", 0, 68)
snake_case_ = Process("""P4""", 0, 24)
snake_case_ = 3
snake_case_ = [17, 25]
snake_case_ = deque([Pa, Pa, Pa, Pa])
snake_case_ = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 181 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = args.log_outputs
__lowerCAmelCase = '''_'''.join(args.dataset.split('''/''') + [args.config, args.split])
# load metric
__lowerCAmelCase = load_metric('''wer''')
__lowerCAmelCase = load_metric('''cer''')
# compute metrics
__lowerCAmelCase = wer.compute(references=result['''target'''], predictions=result['''prediction'''])
__lowerCAmelCase = cer.compute(references=result['''target'''], predictions=result['''prediction'''])
# print & log results
__lowerCAmelCase = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCAmelCase__)
with open(F"""{dataset_id}_eval_results.txt""", '''w''') as f:
f.write(lowerCAmelCase__)
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__lowerCAmelCase = F"""log_{dataset_id}_predictions.txt"""
__lowerCAmelCase = F"""log_{dataset_id}_targets.txt"""
with open(lowerCAmelCase__, '''w''') as p, open(lowerCAmelCase__, '''w''') as t:
# mapping function to write output
def write_to_file(lowerCamelCase, lowerCamelCase):
p.write(F"""{i}""" + '''\n''')
p.write(batch['''prediction'''] + '''\n''')
t.write(F"""{i}""" + '''\n''')
t.write(batch['''target'''] + '''\n''')
result.map(lowerCAmelCase__, with_indices=lowerCAmelCase__)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__lowerCAmelCase = re.sub(lowerCAmelCase__, '''''', text.lower())
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__lowerCAmelCase = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
__lowerCAmelCase = ''' '''.join(text.split(lowerCAmelCase__))
return text
def __magic_name__( lowerCamelCase):
# load dataset
__lowerCAmelCase = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=lowerCAmelCase__)
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id)
__lowerCAmelCase = feature_extractor.sampling_rate
# resample audio
__lowerCAmelCase = dataset.cast_column('''audio''', Audio(sampling_rate=lowerCAmelCase__))
# load eval pipeline
if args.device is None:
__lowerCAmelCase = 0 if torch.cuda.is_available() else -1
__lowerCAmelCase = pipeline('''automatic-speech-recognition''', model=args.model_id, device=args.device)
# map function to decode audio
def map_to_pred(lowerCamelCase):
__lowerCAmelCase = asr(
batch['''audio''']['''array'''], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s)
__lowerCAmelCase = prediction['''text''']
__lowerCAmelCase = normalize_text(batch['''sentence'''])
return batch
# run inference on all examples
__lowerCAmelCase = dataset.map(lowerCAmelCase__, remove_columns=dataset.column_names)
# compute and log_results
# do not change function below
log_results(lowerCAmelCase__, lowerCAmelCase__)
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
main(args)
| 174 |
"""simple docstring"""
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase ( lowerCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowerCAmelCase__ )
__a = ''''''.join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
__a = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a = b'''=''' * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
__a = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowercase ( lowerCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
__a = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
__a = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a = encoded_data[:-padding]
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
__a = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase__ ) , 8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["pixel_values"]
def __init__( self : Any , __A : bool = True , __A : int = 3_2 , __A : Tuple=PILImageResampling.BILINEAR , __A : bool = True , **__A : Optional[Any] , ):
snake_case__ : List[str] = do_resize
snake_case__ : Optional[Any] = do_rescale
snake_case__ : int = size_divisor
snake_case__ : int = resample
super().__init__(**__A )
def _lowercase ( self : List[str] , __A : np.ndarray , __A : int , __A : Union[str, Any] , __A : Optional[ChannelDimension] = None , **__A : Any ):
snake_case__ : Optional[Any] = get_image_size(__A )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Optional[int] = height // size_divisor * size_divisor
snake_case__ : Any = width // size_divisor * size_divisor
snake_case__ : Optional[Any] = resize(__A , (new_h, new_w) , resample=__A , data_format=__A , **__A )
return image
def _lowercase ( self : Any , __A : np.ndarray , __A : float , __A : Optional[ChannelDimension] = None , **__A : List[str] ):
return rescale(image=__A , scale=__A , data_format=__A , **__A )
def _lowercase ( self : Union[str, Any] , __A : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __A : Optional[bool] = None , __A : Optional[int] = None , __A : Dict=None , __A : Optional[bool] = None , __A : Optional[Union[TensorType, str]] = None , __A : ChannelDimension = ChannelDimension.FIRST , **__A : Tuple , ):
snake_case__ : str = do_resize if do_resize is not None else self.do_resize
snake_case__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : List[str] = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
snake_case__ : Dict = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
snake_case__ : int = [to_numpy_array(__A ) for img in images]
if do_resize:
snake_case__ : Tuple = [self.resize(__A , size_divisor=__A , resample=__A ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(__A , scale=1 / 2_5_5 ) for image in images]
snake_case__ : Dict = [to_channel_dimension_format(__A , __A ) for image in images]
snake_case__ : int = {"pixel_values": images}
return BatchFeature(data=__A , tensor_type=__A )
| 358 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Any = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 | 0 |
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple: # noqa: E741
while r - l > 1:
lowercase : Dict = (l + r) // 2
if v[m] >= key:
lowercase : Any = m
else:
lowercase : Optional[Any] = m # noqa: E741
return r
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return 0
lowercase : str = [0] * len(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = 1
lowercase : int = v[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
if v[i] < tail[0]:
lowercase : List[Any] = v[i]
elif v[i] > tail[length - 1]:
lowercase : str = v[i]
length += 1
else:
lowercase : List[Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self ,snake_case ,snake_case=7 ,snake_case=3 ,snake_case=18 ,snake_case=30 ,snake_case=400 ,snake_case=True ,snake_case=None ,snake_case=True ,snake_case=None ,):
'''simple docstring'''
lowercase : Dict = size if size is not None else {"""shortest_edge""": 20}
lowercase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowercase : str = parent
lowercase : int = batch_size
lowercase : str = num_channels
lowercase : int = image_size
lowercase : List[str] = min_resolution
lowercase : str = max_resolution
lowercase : Dict = do_resize
lowercase : Dict = size
lowercase : Dict = do_center_crop
lowercase : str = crop_size
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : Any= MobileNetVaImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = MobileNetVaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case ,"""do_resize""" ) )
self.assertTrue(hasattr(snake_case ,"""size""" ) )
self.assertTrue(hasattr(snake_case ,"""do_center_crop""" ) )
self.assertTrue(hasattr(snake_case ,"""crop_size""" ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,Image.Image )
# Test not batched input
lowercase : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : Tuple = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,np.ndarray )
# Test not batched input
lowercase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : List[str] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,torch.Tensor )
# Test not batched input
lowercase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : List[str] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 20 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> int:
return int(input_a == input_a == 0 )
def _lowerCAmelCase ( ) -> None:
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 365 |
'''simple docstring'''
import itertools
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( ) -> List[Any]:
__A : Optional[Any] = 2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def _lowerCAmelCase ( __snake_case : int = 1_00_01 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , __snake_case ) )
if __name__ == "__main__":
print(f"""{solution() = }""") | 190 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.