code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
def __init__( self : int , _A : int ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase : List[str] = model
lowercase : List[Any] = 2
lowercase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : str = LongformerModel.from_pretrained(__magic_name__ )
lowercase : Union[str, Any] = LightningModel(__magic_name__ )
lowercase : Union[str, Any] = torch.load(__magic_name__ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : Dict = LongformerForQuestionAnswering.from_pretrained(__magic_name__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__magic_name__ )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
) | 308 |
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''') | 308 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Any = "switch_transformers"
a__ : Optional[Any] = ["past_key_values"]
a__ : List[str] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __UpperCAmelCase : int=32_128 , __UpperCAmelCase : str=768 , __UpperCAmelCase : str=64 , __UpperCAmelCase : List[Any]=2_048 , __UpperCAmelCase : List[str]=64 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : List[Any]=12 , __UpperCAmelCase : str=8 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : List[str]=0.01 , __UpperCAmelCase : Dict="float32" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=32 , __UpperCAmelCase : List[str]=128 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=1E-6 , __UpperCAmelCase : Optional[int]=0.001 , __UpperCAmelCase : Dict=0.001 , __UpperCAmelCase : Any=1.0 , __UpperCAmelCase : Any="relu" , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : str=1 , **__UpperCAmelCase : Dict , ) -> Optional[int]:
UpperCAmelCase_= vocab_size
UpperCAmelCase_= d_model
UpperCAmelCase_= d_kv
UpperCAmelCase_= d_ff
UpperCAmelCase_= num_sparse_encoder_layers
UpperCAmelCase_= num_layers
UpperCAmelCase_= (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_= num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
UpperCAmelCase_= self.num_layers // self.num_sparse_encoder_layers
else:
UpperCAmelCase_= self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
UpperCAmelCase_= self.num_decoder_layers // self.num_sparse_decoder_layers
else:
UpperCAmelCase_= self.num_decoder_layers # HACK: this will create 0 sparse layers
UpperCAmelCase_= num_heads
UpperCAmelCase_= num_experts
UpperCAmelCase_= expert_capacity
UpperCAmelCase_= router_bias
UpperCAmelCase_= router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
UpperCAmelCase_= router_dtype
UpperCAmelCase_= router_ignore_padding_tokens
UpperCAmelCase_= relative_attention_num_buckets
UpperCAmelCase_= relative_attention_max_distance
UpperCAmelCase_= dropout_rate
UpperCAmelCase_= layer_norm_epsilon
UpperCAmelCase_= initializer_factor
UpperCAmelCase_= feed_forward_proj
UpperCAmelCase_= use_cache
UpperCAmelCase_= add_router_probs
UpperCAmelCase_= router_z_loss_coef
UpperCAmelCase_= router_aux_loss_coef
UpperCAmelCase_= self.feed_forward_proj.split("""-""" )
UpperCAmelCase_= act_info[-1]
UpperCAmelCase_= act_info[0] == '''gated'''
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_= '''gelu_new'''
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
| 352 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowercase :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[Any]=13 , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=99 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : str=None , ) -> str:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= seq_length
UpperCAmelCase_= is_training
UpperCAmelCase_= use_input_mask
UpperCAmelCase_= use_token_type_ids
UpperCAmelCase_= use_labels
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_act
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= type_vocab_size
UpperCAmelCase_= type_sequence_label_size
UpperCAmelCase_= initializer_range
UpperCAmelCase_= num_labels
UpperCAmelCase_= num_choices
UpperCAmelCase_= scope
UpperCAmelCase_= vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_= None
if self.use_input_mask:
UpperCAmelCase_= random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_= self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_= True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_= GPTNeoXModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> Dict:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> int:
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[int]:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
UpperCAmelCase_= outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_= torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_= torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
UpperCAmelCase_= output_from_no_past["""hidden_states"""][0]
UpperCAmelCase_= model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_= ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_= output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_= output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Union[str, Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Any = (GPTNeoXForCausalLM,) if is_torch_available() else ()
a__ : str = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Optional[int] = False
a__ : Tuple = False
a__ : int = False
a__ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_= GPTNeoXModelTester(self )
UpperCAmelCase_= ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_= None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Any ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_= ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_= ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_= AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase_= GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCAmelCase )
UpperCAmelCase_= tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase_= """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase_= model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 277 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCAmelCase : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_lowerCAmelCase : Optional[int] = tuple[int, int]
class _UpperCamelCase :
def __init__( self :Union[str, Any] , lowerCamelCase :int , lowerCamelCase :int , lowerCamelCase :int , lowerCamelCase :int , lowerCamelCase :int , lowerCamelCase :Node | None , ) -> str:
UpperCAmelCase__ = pos_x
UpperCAmelCase__ = pos_y
UpperCAmelCase__ = (pos_y, pos_x)
UpperCAmelCase__ = goal_x
UpperCAmelCase__ = goal_y
UpperCAmelCase__ = g_cost
UpperCAmelCase__ = parent
UpperCAmelCase__ = self.calculate_heuristic()
UpperCAmelCase__ = self.g_cost + self.h_cost
def UpperCAmelCase_ ( self :str ) -> str:
UpperCAmelCase__ = self.pos_x - self.goal_x
UpperCAmelCase__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__lowercase ) + abs(__lowercase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self :Tuple , lowerCamelCase :Node ) -> Any:
return self.f_cost < other.f_cost
class _UpperCamelCase :
def __init__( self :Optional[Any] , lowerCamelCase :TPosition , lowerCamelCase :TPosition ) -> Tuple:
UpperCAmelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowercase )
UpperCAmelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , __lowercase )
UpperCAmelCase__ = [self.start]
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def UpperCAmelCase_ ( self :List[str] ) -> Union[str, Any]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__lowercase )
self.closed_nodes.append(__lowercase )
UpperCAmelCase__ = self.get_successors(__lowercase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowercase )
else:
# retrieve the best current path
UpperCAmelCase__ = self.open_nodes.pop(self.open_nodes.index(__lowercase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowercase )
else:
self.open_nodes.append(__lowercase )
return [self.start.pos]
def UpperCAmelCase_ ( self :Any , lowerCamelCase :Node ) -> Any:
UpperCAmelCase__ = []
for action in delta:
UpperCAmelCase__ = parent.pos_x + action[1]
UpperCAmelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowercase , __lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowercase , ) )
return successors
def UpperCAmelCase_ ( self :Any , lowerCamelCase :Node | None ) -> int:
UpperCAmelCase__ = node
UpperCAmelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase__ = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
def __init__( self :Optional[int] , lowerCamelCase :TPosition , lowerCamelCase :TPosition ) -> Tuple:
UpperCAmelCase__ = AStar(__lowercase , __lowercase )
UpperCAmelCase__ = AStar(__lowercase , __lowercase )
UpperCAmelCase__ = False
def UpperCAmelCase_ ( self :List[Any] ) -> Union[str, Any]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase__ = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__lowercase , __lowercase )
self.fwd_astar.closed_nodes.append(__lowercase )
self.bwd_astar.closed_nodes.append(__lowercase )
UpperCAmelCase__ = current_bwd_node
UpperCAmelCase__ = current_fwd_node
UpperCAmelCase__ = {
self.fwd_astar: self.fwd_astar.get_successors(__lowercase ),
self.bwd_astar: self.bwd_astar.get_successors(__lowercase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__lowercase )
else:
# retrieve the best current path
UpperCAmelCase__ = astar.open_nodes.pop(
astar.open_nodes.index(__lowercase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__lowercase )
else:
astar.open_nodes.append(__lowercase )
return [self.fwd_astar.start.pos]
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :Node , lowerCamelCase :Node ) -> int:
UpperCAmelCase__ = self.fwd_astar.retrace_path(__lowercase )
UpperCAmelCase__ = self.bwd_astar.retrace_path(__lowercase )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_lowerCAmelCase : List[Any] = (0, 0)
_lowerCAmelCase : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : Union[str, Any] = AStar(init, goal)
_lowerCAmelCase : Optional[Any] = a_star.search()
_lowerCAmelCase : Optional[Any] = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : List[str] = BidirectionalAStar(init, goal)
_lowerCAmelCase : List[str] = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 169 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case ( self : int ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str]=0 ):
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
__lowercase =torch.manual_seed(__lowercase )
else:
__lowercase =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase =floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self : int ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self : str ):
"""simple docstring"""
self._test_save_load_local()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 141 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
UpperCAmelCase_ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCAmelCase__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCAmelCase__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCAmelCase__ )
return parser.parse_args()
def a__ ( ):
UpperCAmelCase_ = parse_args()
# Import training_script as a module.
UpperCAmelCase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase_ = script_fpath.stem
UpperCAmelCase_ = importlib.import_module(lowerCAmelCase__ )
# Patch sys.argv
UpperCAmelCase_ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 241 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 241 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Any = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class A ( __A ):
'''simple docstring'''
A__ = "speech_to_text_2"
A__ = ["past_key_values"]
A__ = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__(self : Optional[Any] , _UpperCAmelCase : List[Any]=1_0000 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Union[str, Any]=2048 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : Dict=256 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : str=1 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Any=1024 , **_UpperCAmelCase : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = decoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_target_positions
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 305 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCAmelCase_ : Optional[int] = HfArgumentParser(InitializationArguments)
UpperCAmelCase_ : int = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCAmelCase_ : Tuple = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 360 |
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
while b:
a_ , a_ : int = b, a % b
return a
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__A , a % b )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 120 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase_ : Tuple = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ : List[str] = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
UpperCAmelCase_ : Optional[int] = """▁"""
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : int="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : List[Any]="<pad>" , lowercase_ : Union[str, Any]=100 , lowercase_ : Any=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : List[str]=True , **lowercase_ : Dict , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE_ : Dict = [F'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE_ : str = len(set(filter(lambda lowercase_: bool('''extra_id''' in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''')
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = legacy
SCREAMING_SNAKE_CASE_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE_ : Optional[int] = extra_ids
SCREAMING_SNAKE_CASE_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str]):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
SCREAMING_SNAKE_CASE_ : Any = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowercase_ , )
return max_model_length
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return list(
set(filter(lambda lowercase_: bool(re.search(r'''<extra_id_\d+>''' , lowercase_)) is not None , self.additional_special_tokens)))
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[int]):
'''simple docstring'''
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
''' eos tokens being added.''')
return token_ids
else:
return token_ids + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE_ : Tuple = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
return state
def __setstate__( self : Tuple , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
SCREAMING_SNAKE_CASE_ : Tuple = {}
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : "TextInput" , **lowercase_ : Optional[Any]):
'''simple docstring'''
if not self.legacy:
SCREAMING_SNAKE_CASE_ : int = SPIECE_UNDERLINE + text.replace(lowercase_ , ''' ''')
return super().tokenize(lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Optional[Any] , **lowercase_ : List[Any]):
'''simple docstring'''
if not self.legacy:
SCREAMING_SNAKE_CASE_ : List[Any] = text.startswith(lowercase_)
if is_first:
SCREAMING_SNAKE_CASE_ : List[str] = text[1:]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(''' ''') and tokens[0].startswith(lowercase_):
SCREAMING_SNAKE_CASE_ : Any = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Tuple):
'''simple docstring'''
if token.startswith('''<extra_id_'''):
SCREAMING_SNAKE_CASE_ : int = re.match(r'''<extra_id_(\d+)>''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[str]):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.IdToPiece(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : int = ''''''
SCREAMING_SNAKE_CASE_ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : str = []
else:
current_sub_tokens.append(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(lowercase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , '''wb''') as fi:
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 91 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]):
'''simple docstring'''
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 91 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , **a : Any ):
'''simple docstring'''
super().__init__(**a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[Any] , a : Union[str, List[str], "Image", List["Image"]] , **a : Optional[int] ):
'''simple docstring'''
return super().__call__(a , **a )
def _lowerCamelCase ( self : Union[str, Any] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Optional[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _lowerCamelCase ( self : Dict , a : Union[str, Any] , a : Any=None , a : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
lowerCAmelCase__ : Any = load_image(a )
lowerCAmelCase__ : Dict = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ : Optional[Any] = candidate_labels
lowerCAmelCase__ : int = [hypothesis_template.format(a ) for x in candidate_labels]
lowerCAmelCase__ : List[str] = self.tokenizer(a , return_tensors=self.framework , padding=a )
lowerCAmelCase__ : Tuple = [text_inputs]
return inputs
def _lowerCamelCase ( self : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = model_inputs.pop('candidate_labels' )
lowerCAmelCase__ : List[Any] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , a ):
lowerCAmelCase__ : Tuple = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : int = text_inputs[0][0]
lowerCAmelCase__ : Any = self.model(**a , **a )
lowerCAmelCase__ : int = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self : int , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = model_outputs.pop('candidate_labels' )
lowerCAmelCase__ : List[Any] = model_outputs['logits'][0]
if self.framework == "pt":
lowerCAmelCase__ : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ : Tuple = probs.tolist()
if not isinstance(a , a ):
lowerCAmelCase__ : int = [scores]
elif self.framework == "tf":
lowerCAmelCase__ : Tuple = stable_softmax(a , axis=-1 )
lowerCAmelCase__ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase__ : Any = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(a , a ) , key=lambda a : -x[0] )
]
return result | 366 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) ) | 307 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def snake_case__ ( self : Any ):
"""simple docstring"""
super().setUp()
snake_case_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case__ ( self : Union[str, Any] , __lowercase : Any ):
"""simple docstring"""
snake_case_ = 'UNwant\u00E9d,running'
snake_case_ = 'unwanted, running'
return input_text, output_text
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self.tokenizer_class(self.vocab_file )
snake_case_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'UNwant\u00E9d,running'
snake_case_ = tokenizer.tokenize(_a )
snake_case_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
snake_case_ = tokenizer.encode(_a , add_special_tokens=_a )
snake_case_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(_a )
snake_case_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
snake_case_ = self.get_tokenizer(do_lower_case=_a )
snake_case_ = self.get_rust_tokenizer(do_lower_case=_a )
snake_case_ = 'UNwant\u00E9d,running'
snake_case_ = tokenizer.tokenize(_a )
snake_case_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
snake_case_ = tokenizer.encode(_a , add_special_tokens=_a )
snake_case_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(_a )
snake_case_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = BasicTokenizer()
snake_case_ = 'a\n\'ll !!to?\'d of, can\'t.'
snake_case_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case_ = {}
for i, token in enumerate(_a ):
snake_case_ = i
snake_case_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def snake_case__ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("bert-base-uncased" )
snake_case_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
snake_case_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_a )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def snake_case__ ( self : int ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
snake_case_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
snake_case_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
snake_case_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
snake_case_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = ['的', '人', '有']
snake_case_ = ''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ = True
snake_case_ = self.tokenizer_class.from_pretrained(_a , **_a )
snake_case_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
snake_case_ = tokenizer_p.encode(_a , add_special_tokens=_a )
snake_case_ = tokenizer_r.encode(_a , add_special_tokens=_a )
snake_case_ = tokenizer_r.convert_ids_to_tokens(_a )
snake_case_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
snake_case_ = False
snake_case_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
snake_case_ = self.tokenizer_class.from_pretrained(_a , **_a )
snake_case_ = tokenizer_r.encode(_a , add_special_tokens=_a )
snake_case_ = tokenizer_p.encode(_a , add_special_tokens=_a )
snake_case_ = tokenizer_r.convert_ids_to_tokens(_a )
snake_case_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 187 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 208 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = sd_pipe.prepare_inputs(lowerCAmelCase_ )
_snake_case = replicate(lowerCAmelCase_ )
_snake_case = shard(lowerCAmelCase_ )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(lowerCAmelCase_ , jax.device_count() )
_snake_case = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=25 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = 'stabilityai/stable-diffusion-2'
_snake_case , _snake_case = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase_ , scheduler=lowerCAmelCase_ , revision='bf16' , dtype=jnp.bfloataa , )
_snake_case = scheduler_params
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = sd_pipe.prepare_inputs(lowerCAmelCase_ )
_snake_case = replicate(lowerCAmelCase_ )
_snake_case = shard(lowerCAmelCase_ )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(lowerCAmelCase_ , jax.device_count() )
_snake_case = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=25 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 295 |
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , SCREAMING_SNAKE_CASE )
print("""Decoded:""" , decode(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 46 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 328 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_UpperCAmelCase = False
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = 'ybelkada/fonts'
def lowerCAmelCase_ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"Pix2StructImageProcessor. Please upgrade torch." )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
requires_backends(UpperCamelCase_ , ["torch"] )
_check_torch_version()
UpperCamelCase_ = image_tensor.unsqueeze(0 )
UpperCamelCase_ = torch.nn.functional.unfold(UpperCamelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCamelCase_ , UpperCamelCase_ , -1 )
UpperCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 36 , UpperCamelCase_ = "black" , UpperCamelCase_ = "white" , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Image.Image:
requires_backends(UpperCamelCase_ , "vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_ = textwrap.TextWrapper(width=80 )
UpperCamelCase_ = wrapper.wrap(text=UpperCamelCase_ )
UpperCamelCase_ = "\n".join(UpperCamelCase_ )
if font_bytes is not None and font_path is None:
UpperCamelCase_ = io.BytesIO(UpperCamelCase_ )
elif font_path is not None:
UpperCamelCase_ = font_path
else:
UpperCamelCase_ = hf_hub_download(UpperCamelCase_ , "Arial.TTF" )
UpperCamelCase_ = ImageFont.truetype(UpperCamelCase_ , encoding="UTF-8" , size=UpperCamelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , UpperCamelCase_ ) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = temp_draw.textbbox((0, 0) , UpperCamelCase_ , UpperCamelCase_ )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_ = text_width + left_padding + right_padding
UpperCamelCase_ = text_height + top_padding + bottom_padding
UpperCamelCase_ = Image.new("RGB" , (image_width, image_height) , UpperCamelCase_ )
UpperCamelCase_ = ImageDraw.Draw(UpperCamelCase_ )
draw.text(xy=(left_padding, top_padding) , text=UpperCamelCase_ , fill=UpperCamelCase_ , font=UpperCamelCase_ )
return image
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
requires_backends(UpperCamelCase_ , "vision" )
# Convert to PIL image if necessary
UpperCamelCase_ = to_pil_image(UpperCamelCase_ )
UpperCamelCase_ = render_text(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ = max(header_image.width , image.width )
UpperCamelCase_ = int(image.height * (new_width / image.width) )
UpperCamelCase_ = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_ = to_numpy_array(UpperCamelCase_ )
if infer_channel_dimension_format(UpperCamelCase_ ) == ChannelDimension.LAST:
UpperCamelCase_ = to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = ['''flattened_patches''']
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: int = 2048 , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_convert_rgb
UpperCamelCase_ = max_patches
UpperCamelCase_ = is_vqa
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: dict , **_SCREAMING_SNAKE_CASE: Union[str, Any] ) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase_ = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , ChannelDimension.FIRST )
UpperCamelCase_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = patch_size["height"], patch_size["width"]
UpperCamelCase_ , UpperCamelCase_ = get_image_size(_SCREAMING_SNAKE_CASE )
# maximize scale s.t.
UpperCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_ = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE , antialias=_SCREAMING_SNAKE_CASE , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = torch_extract_patches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patches.shape
UpperCamelCase_ = patches_shape[1]
UpperCamelCase_ = patches_shape[2]
UpperCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , _SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] )
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(_SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_ = row_ids.to(torch.floataa )
UpperCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.nn.functional.pad(_SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_ = to_numpy_array(_SCREAMING_SNAKE_CASE )
return result
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[str] ) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
UpperCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_ = np.mean(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = np.std(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: ImageInput , _SCREAMING_SNAKE_CASE: Optional[str] = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: ChannelDimension = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> ImageInput:
"""simple docstring"""
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_ = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_ = self.is_vqa
if kwargs.get("data_format" , _SCREAMING_SNAKE_CASE ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase_ = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase_ = kwargs.pop("font_bytes" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = kwargs.pop("font_path" , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = [header_text] * len(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [
render_header(_SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=_SCREAMING_SNAKE_CASE , font_path=_SCREAMING_SNAKE_CASE )
for i, image in enumerate(_SCREAMING_SNAKE_CASE )
]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_SCREAMING_SNAKE_CASE ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_ = [
self.extract_flattened_patches(image=_SCREAMING_SNAKE_CASE , max_patches=_SCREAMING_SNAKE_CASE , patch_size=_SCREAMING_SNAKE_CASE )
for image in images
]
# create attention mask in numpy
UpperCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_outputs
| 328 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_snake_case = datasets.logging.get_logger(__name__)
_snake_case = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
_snake_case = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
_snake_case = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
_snake_case = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a__ ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def a__ ( self , _a ) -> Any:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
_A : str = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
_A : str = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_A : Tuple = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
_A : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_A : Union[str, Any] = score.BleurtScorer(os.path.join(_a , _a ) )
def a__ ( self , _a , _a ) -> List[str]:
_A : List[str] = self.scorer.score(references=_a , candidates=_a )
return {"scores": scores}
| 26 |
__a :Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 312 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ (lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : UNetaDModel
SCREAMING_SNAKE_CASE : ScoreSdeVeScheduler
def __init__( self : Optional[int] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
super().__init__()
self.register_modules(unet=lowercase_ ,scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] ,lowercase__ : Any = 1 ,lowercase__ : List[Any] = 2_0_0_0 ,lowercase__ : Optional[Any] = None ,lowercase__ : str = "pil" ,lowercase__ : List[Any] = True ,**lowercase__ : Any ,):
__lowercase = self.unet.config.sample_size
__lowercase = (batch_size, 3, img_size, img_size)
__lowercase = self.unet
__lowercase = randn_tensor(lowercase_ ,generator=lowercase_ ) * self.scheduler.init_noise_sigma
__lowercase = sample.to(self.device )
self.scheduler.set_timesteps(lowercase_ )
self.scheduler.set_sigmas(lowercase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowercase = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__lowercase = self.unet(lowercase_ ,lowercase_ ).sample
__lowercase = self.scheduler.step_correct(lowercase_ ,lowercase_ ,generator=lowercase_ ).prev_sample
# prediction step
__lowercase = model(lowercase_ ,lowercase_ ).sample
__lowercase = self.scheduler.step_pred(lowercase_ ,lowercase_ ,lowercase_ ,generator=lowercase_ )
__lowercase = output.prev_sample, output.prev_sample_mean
__lowercase = sample_mean.clamp(0 ,1 )
__lowercase = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase_ )
| 351 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'gpt_neo'
SCREAMING_SNAKE_CASE : Any = ['past_key_values']
SCREAMING_SNAKE_CASE : Union[str, Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Any ,lowercase__ : Tuple=5_0_2_5_7 ,lowercase__ : Union[str, Any]=2_0_4_8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : Optional[Any]=2_4 ,lowercase__ : Union[str, Any]=[[["global", "local"], 1_2]] ,lowercase__ : List[Any]=1_6 ,lowercase__ : Optional[Any]=None ,lowercase__ : Optional[int]=2_5_6 ,lowercase__ : Union[str, Any]="gelu_new" ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Dict=0.0 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[str]=1e-5 ,lowercase__ : Dict=0.0_2 ,lowercase__ : str=True ,lowercase__ : int=5_0_2_5_6 ,lowercase__ : Any=5_0_2_5_6 ,**lowercase__ : Optional[Any] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_layers
__lowercase = num_heads
__lowercase = intermediate_size
__lowercase = window_size
__lowercase = activation_function
__lowercase = resid_dropout
__lowercase = embed_dropout
__lowercase = attention_dropout
__lowercase = classifier_dropout
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
__lowercase = attention_types
__lowercase = self.expand_attention_types_params(lowercase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Tuple ):
__lowercase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
import torch
__lowercase = input.size()
__lowercase = len(A__ )
__lowercase = shape[dimension]
__lowercase = torch.arange(0 , A__ , A__ )
__lowercase = torch.div(sizedim - size , A__ , rounding_mode='''floor''' ) + 1
__lowercase = torch.arange(A__ ) + low_indices[:min_length][:, None]
__lowercase = [slice(A__ )] * rank
__lowercase = indices
__lowercase = input[s]
__lowercase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
import torch
__lowercase = torch.arange(1 , A__ )
__lowercase = torch.remainder(A__ , A__ )
__lowercase = remainders == 0
__lowercase = candidates[divisor_indices]
__lowercase = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode='''floor''' )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
__lowercase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self._config.num_heads
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = super(lowercase__ ,self ).generate_dummy_inputs(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers )
]
__lowercase = common_inputs['''attention_mask''']
if self.use_past:
__lowercase = ordered_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return 1_3
| 52 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( a_, unittest.TestCase ):
UpperCAmelCase_ : List[str] =TransfoXLTokenizer
UpperCAmelCase_ : Optional[int] =False
UpperCAmelCase_ : Optional[int] =False
def UpperCamelCase_ ( self ):
super().setUp()
lowercase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase_ ( self , **_lowerCamelCase ):
lowercase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = '<unk> UNwanted , running'
lowercase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase_ ( self ):
lowercase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_lowerCamelCase )
lowercase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_lowerCamelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [0, 4, 8, 7] )
def UpperCamelCase_ ( self ):
lowercase = TransfoXLTokenizer(lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase_ ( self ):
lowercase = TransfoXLTokenizer(lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase_ ( self ):
lowercase = TransfoXLTokenizer(lower_case=_lowerCamelCase )
lowercase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
lowercase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(_lowerCamelCase ) , _lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.get_tokenizer()
lowercase = len(_lowerCamelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_lowerCamelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 220 |
"""simple docstring"""
_UpperCamelCase : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_UpperCamelCase : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def _SCREAMING_SNAKE_CASE ( __snake_case : float , __snake_case : str , __snake_case : str ):
'''simple docstring'''
lowercase = from_type.lower().strip('s' )
lowercase = to_type.lower().strip('s' )
lowercase = UNIT_SYMBOL.get(__snake_case , __snake_case )
lowercase = UNIT_SYMBOL.get(__snake_case , __snake_case )
if from_sanitized not in METRIC_CONVERSION:
lowercase = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(__snake_case )}'
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
lowercase = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(__snake_case )}'
)
raise ValueError(__snake_case )
lowercase = METRIC_CONVERSION[from_sanitized]
lowercase = METRIC_CONVERSION[to_sanitized]
lowercase = 1
if from_exponent > to_exponent:
lowercase = from_exponent - to_exponent
else:
lowercase = -(to_exponent - from_exponent)
return value * pow(10 , __snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 220 | 1 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 358 | from math import isqrt
def snake_case ( snake_case__ :int) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__):
_A = False
return [i for i in range(2 , snake_case__) if is_prime[i]]
def snake_case ( snake_case__ :int = 10**8) -> int:
_A = calculate_prime_numbers(max_number // 2)
_A = 0
_A = 0
_A = len(snake_case__) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 81 | 0 |
from __future__ import annotations
from fractions import Fraction
def snake_case_ ( snake_case , snake_case ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def snake_case_ ( snake_case ) -> list[str]:
lowercase__: Any = []
lowercase__: List[str] = 11
lowercase__: str = int('1' + '0' * digit_len )
for num in range(snake_case , snake_case ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case , snake_case ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
lowercase__: Dict = 10
return solutions
def snake_case_ ( snake_case = 2 ) -> int:
lowercase__: Dict = 1.0
for fraction in fraction_list(snake_case ):
lowercase__: Optional[int] = Fraction(snake_case )
result *= frac.denominator / frac.numerator
return int(snake_case )
if __name__ == "__main__":
print(solution())
| 196 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __a ( __UpperCamelCase ):
def __init__( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: int = {}
if "candidate_labels" in kwargs:
lowercase__: Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowercase__: List[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This is a photo of {}." ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = load_image(lowerCAmelCase__ )
lowercase__: Dict = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__: Tuple = candidate_labels
lowercase__: List[str] = [hypothesis_template.format(lowerCAmelCase__ ) for x in candidate_labels]
lowercase__: Optional[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework , padding=lowerCAmelCase__ )
lowercase__: str = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: str = model_inputs.pop('candidate_labels' )
lowercase__: List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowerCAmelCase__ ):
lowercase__: Any = text_inputs[0]
else:
# Batching case.
lowercase__: Optional[int] = text_inputs[0][0]
lowercase__: Tuple = self.model(**lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Any = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = model_outputs.pop('candidate_labels' )
lowercase__: Dict = model_outputs['logits'][0]
if self.framework == "pt":
lowercase__: Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__: Dict = probs.tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Dict = [scores]
elif self.framework == "tf":
lowercase__: Optional[int] = stable_softmax(lowerCAmelCase__ , axis=-1 )
lowercase__: Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowercase__: List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 196 | 1 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class _A ( SCREAMING_SNAKE_CASE_ ):
lowercase__: List[str] = '''autoformer'''
lowercase__: Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Union[str, Any] , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "student_t" , __magic_name__ : str = "nll" , __magic_name__ : int = 1 , __magic_name__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , __magic_name__ : bool = True , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : Optional[List[int]] = None , __magic_name__ : Optional[List[int]] = None , __magic_name__ : int = 64 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 32 , __magic_name__ : int = 32 , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 1_00 , __magic_name__ : float = 0.02 , __magic_name__ : bool = True , __magic_name__ : int=True , __magic_name__ : int = 10 , __magic_name__ : int = 25 , __magic_name__ : int = 3 , **__magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = prediction_length
__snake_case : int = context_length if context_length is not None else prediction_length
__snake_case : Any = distribution_output
__snake_case : List[str] = loss
__snake_case : List[Any] = input_size
__snake_case : List[str] = num_time_features
__snake_case : int = lags_sequence
__snake_case : List[str] = scaling
__snake_case : Optional[int] = num_dynamic_real_features
__snake_case : Tuple = num_static_real_features
__snake_case : str = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__snake_case : List[Any] = cardinality
else:
__snake_case : List[Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__snake_case : int = embedding_dimension
else:
__snake_case : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case : Any = num_parallel_samples
# Transformer architecture configuration
__snake_case : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case : Optional[int] = d_model
__snake_case : List[Any] = encoder_attention_heads
__snake_case : List[Any] = decoder_attention_heads
__snake_case : Any = encoder_ffn_dim
__snake_case : Optional[int] = decoder_ffn_dim
__snake_case : Tuple = encoder_layers
__snake_case : Any = decoder_layers
__snake_case : List[str] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = activation_dropout
__snake_case : Optional[Any] = encoder_layerdrop
__snake_case : List[Any] = decoder_layerdrop
__snake_case : Optional[Any] = activation_function
__snake_case : Tuple = init_std
__snake_case : Optional[int] = use_cache
# Autoformer
__snake_case : List[str] = label_length
__snake_case : Optional[int] = moving_average
__snake_case : List[str] = autocorrelation_factor
super().__init__(is_encoder_decoder=__a , **__a )
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 350 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__snake_case : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""]
__snake_case : Any = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
__snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
while a != 0:
__UpperCamelCase , __UpperCamelCase = b % a, a
return b
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
if gcd(__lowercase , __lowercase ) != 1:
__UpperCamelCase = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__lowercase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1, 0, a
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 1, m
while va != 0:
__UpperCamelCase = ua // va
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 53 | '''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_UpperCAmelCase : List[Any] = MaskFormerConfig(backbone_config=_UpperCAmelCase )
_UpperCAmelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_UpperCAmelCase : Dict = 847
_UpperCAmelCase : Any = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_UpperCAmelCase : Any = 150
_UpperCAmelCase : Any = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_UpperCAmelCase : Tuple = 171
_UpperCAmelCase : Union[str, Any] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_UpperCAmelCase : Any = 133
_UpperCAmelCase : int = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_UpperCAmelCase : Optional[int] = 19
_UpperCAmelCase : str = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_UpperCAmelCase : Optional[int] = 65
_UpperCAmelCase : Tuple = "mapillary-vistas-id2label.json"
_UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = dct.pop(_UpperCAmelCase )
_UpperCAmelCase : List[str] = val
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCAmelCase : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCAmelCase : Any = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_UpperCAmelCase : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[:dim, :]
_UpperCAmelCase : Tuple = in_proj_bias[: dim]
_UpperCAmelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
_UpperCAmelCase : List[str] = in_proj_bias[
dim : dim * 2
]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
-dim :, :
]
_UpperCAmelCase : Dict = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCAmelCase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_UpperCAmelCase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : int = in_proj_weight[: hidden_size, :]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[:config.hidden_size]
_UpperCAmelCase : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCAmelCase : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase : int = in_proj_weight[-hidden_size :, :]
_UpperCAmelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCAmelCase : Optional[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_UpperCAmelCase : Tuple = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[: hidden_size, :]
_UpperCAmelCase : Tuple = in_proj_bias[:config.hidden_size]
_UpperCAmelCase : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCAmelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase : Optional[int] = in_proj_weight[-hidden_size :, :]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = get_maskformer_config(_UpperCAmelCase )
# load original state_dict
with open(_UpperCAmelCase , "rb" ) as f:
_UpperCAmelCase : Optional[int] = pickle.load(_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_UpperCAmelCase : Any = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_UpperCAmelCase : Tuple = torch.from_numpy(_UpperCAmelCase )
# load 🤗 model
_UpperCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(_UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCAmelCase , param.shape )
_UpperCAmelCase , _UpperCAmelCase : Any = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCAmelCase ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_UpperCAmelCase : Optional[int] = prepare_img()
if "vistas" in model_name:
_UpperCAmelCase : int = 65
elif "cityscapes" in model_name:
_UpperCAmelCase : Tuple = 65_535
else:
_UpperCAmelCase : Any = 255
_UpperCAmelCase : Optional[Any] = True if "ade" in model_name else False
_UpperCAmelCase : Optional[int] = MaskFormerImageProcessor(ignore_index=_UpperCAmelCase , reduce_labels=_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = image_processor(_UpperCAmelCase , return_tensors="pt" )
_UpperCAmelCase : List[Any] = model(**_UpperCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_UpperCAmelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 31 | 0 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase__ : int = logging.get_logger(__name__)
# General docstring
lowercase__ : int = '''PoolFormerConfig'''
# Base docstring
lowercase__ : Tuple = '''sail/poolformer_s12'''
lowercase__ : Tuple = [1, 5_12, 7, 7]
# Image classification docstring
lowercase__ : int = '''sail/poolformer_s12'''
lowercase__ : str = '''tabby, tabby cat'''
lowercase__ : Union[str, Any] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : float = 0.0 , __snake_case : bool = False ) -> Any:
if drop_prob == 0.0 or not training:
return input
__A : str = 1 - drop_prob
__A : List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__A : Any = keep_prob + torch.rand(lowercase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__A : Union[str, Any] = input.div(lowercase__ ) * random_tensor
return output
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase = None):
'''simple docstring'''
super().__init__()
__A : int = drop_prob
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return drop_path(__A , self.drop_prob , self.training)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return "p={}".format(self.drop_prob)
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
'''simple docstring'''
super().__init__()
__A : List[str] = patch_size if isinstance(__A , collections.abc.Iterable) else (patch_size, patch_size)
__A : Any = stride if isinstance(__A , collections.abc.Iterable) else (stride, stride)
__A : Optional[int] = padding if isinstance(__A , collections.abc.Iterable) else (padding, padding)
__A : Dict = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A)
__A : Any = norm_layer(__A) if norm_layer else nn.Identity()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = self.projection(__A)
__A : Union[str, Any] = self.norm(__A)
return embeddings
class SCREAMING_SNAKE_CASE (nn.GroupNorm ):
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(1 , __A , **__A)
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Union[str, Any] = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.pool(__A) - hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : str = nn.Convad(__A , __A , 1)
__A : List[str] = nn.Convad(__A , __A , 1)
__A : Tuple = PoolFormerDropPath(__A)
if isinstance(config.hidden_act , __A):
__A : Optional[int] = ACTaFN[config.hidden_act]
else:
__A : Tuple = config.hidden_act
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.conva(__A)
__A : Union[str, Any] = self.act_fn(__A)
__A : Union[str, Any] = self.drop(__A)
__A : Dict = self.conva(__A)
__A : Any = self.drop(__A)
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Union[str, Any] = PoolFormerPooling(__A)
__A : List[Any] = PoolFormerOutput(__A , __A , __A , __A)
__A : Union[str, Any] = PoolFormerGroupNorm(__A)
__A : Optional[int] = PoolFormerGroupNorm(__A)
# Useful for training neural nets
__A : Union[str, Any] = PoolFormerDropPath(__A) if drop_path > 0.0 else nn.Identity()
__A : Tuple = config.use_layer_scale
if config.use_layer_scale:
__A : str = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A)) , requires_grad=__A)
__A : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A)) , requires_grad=__A)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.use_layer_scale:
__A : Tuple = self.pooling(self.before_norm(__A))
__A : Union[str, Any] = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
__A : List[str] = hidden_states + self.drop_path(__A)
__A : Any = ()
__A : Optional[Any] = self.output(self.after_norm(__A))
__A : int = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
__A : Tuple = hidden_states + self.drop_path(__A)
__A : Tuple = (output,) + outputs
return outputs
else:
__A : Any = self.drop_path(self.pooling(self.before_norm(__A)))
# First residual connection
__A : Any = pooling_output + hidden_states
__A : Optional[int] = ()
# Second residual connection inside the PoolFormerOutput block
__A : Optional[int] = self.drop_path(self.output(self.after_norm(__A)))
__A : Optional[int] = hidden_states + layer_output
__A : List[Any] = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Optional[Any] = config
# stochastic depth decay rule
__A : Any = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
__A : int = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
__A : List[Any] = nn.ModuleList(__A)
# Transformer blocks
__A : Union[str, Any] = []
__A : int = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
__A : Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(__A))
__A : Tuple = nn.ModuleList(__A)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=True):
'''simple docstring'''
__A : Union[str, Any] = () if output_hidden_states else None
__A : Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
__A : int = layers
# Get patch embeddings from hidden_states
__A : List[str] = embedding_layer(__A)
# Send the embeddings through the blocks
for _, blk in enumerate(__A):
__A : int = blk(__A)
__A : int = layer_outputs[0]
if output_hidden_states:
__A : Dict = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A)
class SCREAMING_SNAKE_CASE (A__ ):
lowerCAmelCase = PoolFormerConfig
lowerCAmelCase = "poolformer"
lowerCAmelCase = "pixel_values"
lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if isinstance(__A , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
if isinstance(__A , __A):
__A : List[Any] = value
lowercase__ : List[str] = r'''\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'''
lowercase__ : Dict = r'''\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , A__ , )
class SCREAMING_SNAKE_CASE (A__ ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__(__A)
__A : Dict = config
__A : str = PoolFormerEncoder(__A)
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
'''simple docstring'''
__A : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
__A : Optional[Any] = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
__A : Optional[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : str = nn.Linear(config.hidden_size , config.hidden_size)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.dense(__A)
return output
@add_start_docstrings(
'''\n PoolFormer Model transformer with an image classification head on top\n ''' , A__ , )
class SCREAMING_SNAKE_CASE (A__ ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__(__A)
__A : List[str] = config.num_labels
__A : Tuple = PoolFormerModel(__A)
# Final norm
__A : Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
__A : Optional[int] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
'''simple docstring'''
__A : Any = return_dict if return_dict is not None else self.config.use_return_dict
__A : List[str] = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
__A : Optional[int] = outputs[0]
__A : Dict = self.classifier(self.norm(__A).mean([-2, -1]))
__A : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__A : str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__A : List[Any] = """single_label_classification"""
else:
__A : Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
__A : Optional[Any] = MSELoss()
if self.num_labels == 1:
__A : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
__A : Optional[int] = loss_fct(__A , __A)
elif self.config.problem_type == "single_label_classification":
__A : Union[str, Any] = CrossEntropyLoss()
__A : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__A : Optional[Any] = BCEWithLogitsLoss()
__A : Optional[Any] = loss_fct(__A , __A)
if not return_dict:
__A : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states) | 351 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowercase__ : List[Any] = HfArgumentParser(InitializationArguments)
lowercase__ : List[Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowercase__ : List[str] = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
lowercase__ : Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowercase__ : Optional[int] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 190 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = u
for i in range(1, __lowerCamelCase ):
UpperCAmelCase_ : int = temp * (u - i)
return temp
def __a ( ):
UpperCAmelCase_ : str = int(input("enter the numbers of values: " ) )
UpperCAmelCase_ : list[list[float]] = []
for _ in range(__lowerCamelCase ):
y.append([] )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
y[i].append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = 0
print("enter the values of parameters in a list: " )
UpperCAmelCase_ : Union[str, Any] = list(map(__lowerCamelCase, input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : int = float(input() )
UpperCAmelCase_ : Tuple = int(input("enter the value to interpolate: " ) )
UpperCAmelCase_ : Tuple = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, __lowerCamelCase ):
for j in range(n - i ):
UpperCAmelCase_ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
UpperCAmelCase_ : Optional[int] = y[0][0]
for i in range(1, __lowerCamelCase ):
summ += (ucal(__lowerCamelCase, __lowerCamelCase ) * y[0][i]) / math.factorial(__lowerCamelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 61 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ (A : str , A : List[Any] , A : Any ):
# Initialise PyTorch model
snake_case__ : List[Any] = LxmertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ : List[str] = LxmertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
a_ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 277 | 0 |
def lowerCAmelCase_ ( _lowercase : str) -> int:
assert column_title.isupper()
a__ : Union[str, Any] = 0
a__ : Any = len(_lowercase) - 1
a__ : Dict = 0
while index >= 0:
a__ : Optional[Any] = (ord(column_title[index]) - 64) * pow(26 , _lowercase)
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 370 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : int) -> list[int]:
"""simple docstring"""
if num <= 0:
a__ : Tuple = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(_lowercase)
a__ : List[Any] = [True] * (num + 1)
a__ : List[str] = []
a__ : List[Any] = 2
a__ : Optional[int] = int(math.sqrt(_lowercase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowercase)
# Set multiples of start be False
for i in range(start * start , num + 1 , _lowercase):
if sieve[i] is True:
a__ : Optional[int] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(_lowercase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 266 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
lowerCAmelCase_ : Tuple = {"+", "-", "*", "/"}
lowerCAmelCase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
lowerCAmelCase_ , lowerCAmelCase_ : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : int = inspect.getfile(accelerate.test_utils )
lowerCAmelCase_ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
lowerCAmelCase_ : Dict = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
lowerCAmelCase_ : Dict = [sys.executable] + distributed_args
execute_subprocess_async(a_ , env=os.environ.copy() )
| 241 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase : List[str] = get_tests_dir("fixtures/dummy-config.json")
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = 0
def _lowercase ( self ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCamelCase : str = os.path.join(__SCREAMING_SNAKE_CASE , '''fake-roberta''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(type(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
# Wrong model type will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoConfig.register('''model''' , __SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoConfig.register('''bert''' , __SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase : int = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCamelCase : Tuple = AutoConfig.from_pretrained('''bert-base''' )
def _lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCamelCase : str = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def _lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def _lowercase ( self ):
"""simple docstring"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def _lowercase ( self ):
"""simple docstring"""
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = "new-model"
try:
AutoConfig.register('''new-model''' , __SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCamelCase : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
UpperCamelCase : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 315 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A :
"""simple docstring"""
def __init__( self : int,lowercase_ : List[str],lowercase_ : List[str]=1_3,lowercase_ : Tuple=7,lowercase_ : Any=False,lowercase_ : Optional[int]=True,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : Tuple=3_3,lowercase_ : Any=3_2,lowercase_ : int=5,lowercase_ : Optional[Any]=4,lowercase_ : int=3_7,lowercase_ : Tuple="gelu",lowercase_ : List[Any]=0.1,lowercase_ : Tuple=0.1,lowercase_ : Any=5_1_2,lowercase_ : Any=1_6,lowercase_ : List[str]=2,lowercase_ : Any=0.02,lowercase_ : Union[str, Any]=3,lowercase_ : str=4,lowercase_ : int=None,)-> Optional[int]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = ids_tensor([self.batch_size],self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,pad_token_id=1,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,)
def snake_case__ ( self : Any,lowercase_ : Tuple,lowercase_ : List[str],lowercase_ : Union[str, Any],lowercase_ : List[str],lowercase_ : Tuple,lowercase_ : str )-> str:
'''simple docstring'''
A__ = EsmModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
A__ = model(lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : Dict,lowercase_ : Dict,lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Any )-> List[Any]:
'''simple docstring'''
A__ = EsmForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : Dict,lowercase_ : List[str],lowercase_ : Any,lowercase_ : str,lowercase_ : str )-> List[Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = EsmForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = False
lowerCamelCase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase = ()
lowerCamelCase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = True
def snake_case__ ( self : str )-> str:
'''simple docstring'''
A__ = EsmModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : Optional[int] )-> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[int] )-> Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def snake_case__ ( self : Tuple )-> List[str]:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = EsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def snake_case__ ( self : Tuple )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()[0]
A__ = EsmEmbeddings(config=lowercase_ )
A__ = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
A__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
A__ = create_position_ids_from_input_ids(lowercase_,model.padding_idx )
self.assertEqual(position_ids.shape,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_,lowercase_ ) ) )
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()[0]
A__ = EsmEmbeddings(config=lowercase_ )
A__ = torch.empty(2,4,3_0 )
A__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
A__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
A__ = embeddings.create_position_ids_from_inputs_embeds(lowercase_ )
self.assertEqual(position_ids.shape,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_,lowercase_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def snake_case__ ( self : Optional[Any] )-> Any:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case__ ( self : Optional[int] )-> int:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case__ ( self : Optional[Any] )-> Any:
'''simple docstring'''
pass
@require_torch
class A ( _UpperCAmelCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : str )-> int:
'''simple docstring'''
with torch.no_grad():
A__ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
A__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ = model(lowercase_ )[0]
A__ = 3_3
A__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape,lowercase_ )
A__ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=1E-4 ) )
@slow
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
with torch.no_grad():
A__ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
A__ = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
A__ = model(lowercase_ )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=1E-4 ) )
| 7 |
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_ ( A__ : list , A__ : int = 3 ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = min(A__ )
lowerCAmelCase_ : Optional[int] = max(A__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , A__ ) for x in data]
def UpperCamelCase_ ( A__ : list , A__ : int = 3 ):
'''simple docstring'''
lowerCAmelCase_ : str = mean(A__ )
lowerCAmelCase_ : List[Any] = stdev(A__ )
# standardize data
return [round((x - mu) / (sigma) , A__ ) for x in data]
| 120 | 0 |
import math
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_( a__ = 0.1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(a__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=768 ) ->List[Any]:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = proj_size
SCREAMING_SNAKE_CASE : Any = CLIPVisionModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = PaintByExampleMapper(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=False ) ->int:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model(pixel_values=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = clip_output.pooler_output
SCREAMING_SNAKE_CASE : Optional[Any] = self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE : Tuple = self.final_layer_norm(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.proj_out(_lowerCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->List[str]:
super().__init__()
SCREAMING_SNAKE_CASE : str = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE : List[Any] = config.hidden_size
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , activation_fn='''gelu''' , attention_bias=_lowerCamelCase )
for _ in range(_lowerCamelCase )
] )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
for block in self.blocks:
SCREAMING_SNAKE_CASE : Optional[int] = block(_lowerCamelCase )
return hidden_states
| 19 | 1 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowercase ( __snake_case ,__snake_case ,__snake_case = "x" ,__snake_case = 10**-10 ,__snake_case = 1 ,) -> complex:
__lowerCAmelCase : int = symbols(_A )
__lowerCAmelCase : Union[str, Any] = lambdify(_A ,_A )
__lowerCAmelCase : Union[str, Any] = lambdify(_A ,diff(_A ,_A ) )
__lowerCAmelCase : Tuple = starting_point
while True:
if diff_function(_A ) != 0:
__lowerCAmelCase : Tuple = prev_guess - multiplicity * func(_A ) / diff_function(
_A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowerCAmelCase : Dict = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""") | 269 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self: Any , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Union[str, Any] ) -> int:
super().__init__(features=UpperCamelCase )
snake_case__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any ) -> List[str]:
import torch
if isinstance(UpperCamelCase , UpperCamelCase ) and column:
if all(
isinstance(UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase )
return column
def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> Union[str, Any]:
import torch
if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ):
return value
elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ = {}
if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
snake_case__ = {'dtype': torch.intaa}
elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase , PIL.Image.Image ):
snake_case__ = np.asarray(UpperCamelCase )
return torch.tensor(UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: str ) -> Any:
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase , '__array__' ) and not isinstance(UpperCamelCase , torch.Tensor ):
snake_case__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
elif isinstance(UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: dict ) -> List[str]:
return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pa.Table ) -> Mapping:
snake_case__ = self.numpy_arrow_extractor().extract_row(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_row(UpperCamelCase )
return self.recursive_tensorize(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pa.Table ) -> "torch.Tensor":
snake_case__ = self.numpy_arrow_extractor().extract_column(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] )
snake_case__ = self.recursive_tensorize(UpperCamelCase )
snake_case__ = self._consolidate(UpperCamelCase )
return column
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: pa.Table ) -> Mapping:
snake_case__ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_batch(UpperCamelCase )
snake_case__ = self.recursive_tensorize(UpperCamelCase )
for column_name in batch:
snake_case__ = self._consolidate(batch[column_name] )
return batch
| 307 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] = 1 / sqrt(2 ) ) -> Any:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = cos(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = (1 - _cos) / 2
lowerCAmelCase = 1 - _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str = 1 / sqrt(2 ) ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = cos(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = (1 + _cos) / 2
lowerCAmelCase = -1 - _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] = 1 / sqrt(2 ) ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = cos(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = _sin / 2
lowerCAmelCase = 0
lowerCAmelCase = -ba
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str = 1 / sqrt(2 ) ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = cos(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 1 - alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict = 1 / sqrt(2 ) , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = cos(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = 1 + alpha * big_a
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha * big_a
lowerCAmelCase = 1 + alpha / big_a
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha / big_a
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str = 1 / sqrt(2 ) , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = cos(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
lowerCAmelCase = big_a * (pmc + aaa)
lowerCAmelCase = 2 * big_a * mpc
lowerCAmelCase = big_a * (pmc - aaa)
lowerCAmelCase = ppmc + aaa
lowerCAmelCase = -2 * pmpc
lowerCAmelCase = ppmc - aaa
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict = 1 / sqrt(2 ) , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = cos(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
lowerCAmelCase = big_a * (ppmc + aaa)
lowerCAmelCase = -2 * big_a * pmpc
lowerCAmelCase = big_a * (ppmc - aaa)
lowerCAmelCase = pmc + aaa
lowerCAmelCase = 2 * mpc
lowerCAmelCase = pmc - aaa
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 358 |
'''simple docstring'''
from torch import nn
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' ) | 187 | 0 |
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= 1
__lowercase= 2
while i * i <= n:
__lowercase= 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _lowerCamelCase( ) -> int:
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(lowercase__ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 295 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 295 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
snake_case__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class A_ ( _A ):
lowerCAmelCase__ = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
lowerCAmelCase__ = field(default=_A , metadata={"""help""": """Whether to SortishSamler or not."""} )
lowerCAmelCase__ = field(
default=_A , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
lowerCAmelCase__ = field(default=_A , metadata={"""help""": """whether to use adafactor"""} )
lowerCAmelCase__ = field(
default=_A , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
lowerCAmelCase__ = field(
default=_A , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
lowerCAmelCase__ = field(default=_A , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
lowerCAmelCase__ = field(
default=_A , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
lowerCAmelCase__ = field(
default="""linear""" , metadata={"""help""": F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 363 |
import math
def _a ( lowerCamelCase: int ) -> int:
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
__A = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase )
if number < 1:
__A = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__A = int(math.log(number // 3 , 2 ) ) + 2
__A = [3, 5]
__A = 2
__A = 3
for block in range(1 , lowerCamelCase ):
for _ in range(lowerCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
snake_case__ : Optional[Any] = 0
try:
snake_case__ : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 250 | 0 |
import requests
from bsa import BeautifulSoup
def A_ ( snake_case : str , snake_case : dict ) -> str:
'''simple docstring'''
__UpperCamelCase = BeautifulSoup(requests.get(snake_case , params=snake_case ).content , '''html.parser''' )
__UpperCamelCase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
__UpperCamelCase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase__ : List[str] = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 3_0,
"pages": "3979-3990",
"year": 2_0_1_8,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 328 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Tuple = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'xlnet'
_snake_case = ['mems']
_snake_case = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="bi" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="tanh" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
__UpperCamelCase = d_model // n_head
__UpperCamelCase = ff_activation
__UpperCamelCase = d_inner
__UpperCamelCase = untie_r
__UpperCamelCase = attn_type
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = dropout
__UpperCamelCase = mem_len
__UpperCamelCase = reuse_len
__UpperCamelCase = bi_data
__UpperCamelCase = clamp_len
__UpperCamelCase = same_length
__UpperCamelCase = summary_type
__UpperCamelCase = summary_use_proj
__UpperCamelCase = summary_activation
__UpperCamelCase = summary_last_dropout
__UpperCamelCase = start_n_top
__UpperCamelCase = end_n_top
__UpperCamelCase = bos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = kwargs['''use_cache''']
__UpperCamelCase = use_mems_eval
__UpperCamelCase = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 328 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = 42
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ):
UpperCamelCase__ = 32
UpperCamelCase__ = 4
UpperCamelCase__ = 4
UpperCamelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCamelCase__ = False
UpperCamelCase__ = (320, 640, 1280, 1280)
UpperCamelCase__ = 2
UpperCamelCase__ = 8
UpperCamelCase__ = None
UpperCamelCase__ = 1280
UpperCamelCase__ = 0.0
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
UpperCamelCase__ = True
UpperCamelCase__ = 0
UpperCamelCase__ = False
def lowerCamelCase__ ( self :List[str] , __magic_name__ :jax.random.KeyArray ):
'''simple docstring'''
a = (1, self.in_channels, self.sample_size, self.sample_size)
a = jnp.zeros(__magic_name__ , dtype=jnp.floataa )
a = jnp.ones((1,) , dtype=jnp.intaa )
a = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a , a = jax.random.split(__magic_name__ )
a = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )["params"]
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.block_out_channels
a = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a = self.num_attention_heads or self.attention_head_dim
# input
a = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a = FlaxTimestepEmbedding(__magic_name__ , dtype=self.dtype )
a = self.only_cross_attention
if isinstance(__magic_name__ , __magic_name__ ):
a = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__magic_name__ , __magic_name__ ):
a = (num_attention_heads,) * len(self.down_block_types )
# down
a = []
a = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
a = output_channel
a = block_out_channels[i]
a = i == len(__magic_name__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a = FlaxCrossAttnDownBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a = FlaxDownBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__magic_name__ )
a = down_blocks
# mid
a = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
a = []
a = list(reversed(__magic_name__ ) )
a = list(reversed(__magic_name__ ) )
a = list(reversed(__magic_name__ ) )
a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
a = output_channel
a = reversed_block_out_channels[i]
a = reversed_block_out_channels[min(i + 1 , len(__magic_name__ ) - 1 )]
a = i == len(__magic_name__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
a = FlaxCrossAttnUpBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , prev_output_channel=__magic_name__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a = FlaxUpBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , prev_output_channel=__magic_name__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__magic_name__ )
a = output_channel
a = up_blocks
# out
a = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :str , __magic_name__ :Tuple=None , __magic_name__ :List[str]=None , __magic_name__ :bool = True , __magic_name__ :bool = False , ):
'''simple docstring'''
if not isinstance(__magic_name__ , jnp.ndarray ):
a = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__magic_name__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
a = timesteps.astype(dtype=jnp.floataa )
a = jnp.expand_dims(__magic_name__ , 0 )
a = self.time_proj(__magic_name__ )
a = self.time_embedding(__magic_name__ )
# 2. pre-process
a = jnp.transpose(__magic_name__ , (0, 2, 3, 1) )
a = self.conv_in(__magic_name__ )
# 3. down
a = (sample,)
for down_block in self.down_blocks:
if isinstance(__magic_name__ , __magic_name__ ):
a , a = down_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train )
else:
a , a = down_block(__magic_name__ , __magic_name__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
a = ()
for down_block_res_sample, down_block_additional_residual in zip(
__magic_name__ , __magic_name__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
a = new_down_block_res_samples
# 4. mid
a = self.mid_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
a = down_block_res_samples[-(self.layers_per_block + 1) :]
a = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__magic_name__ , __magic_name__ ):
a = up_block(
__magic_name__ , temb=__magic_name__ , encoder_hidden_states=__magic_name__ , res_hidden_states_tuple=__magic_name__ , deterministic=not train , )
else:
a = up_block(__magic_name__ , temb=__magic_name__ , res_hidden_states_tuple=__magic_name__ , deterministic=not train )
# 6. post-process
a = self.conv_norm_out(__magic_name__ )
a = nn.silu(__magic_name__ )
a = self.conv_out(__magic_name__ )
a = jnp.transpose(__magic_name__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__magic_name__ )
| 347 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
a = BlipProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = self.prepare_image_inputs()
a = image_processor(__magic_name__ , return_tensors="""np""" )
a = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = processor(text=__magic_name__ )
a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__magic_name__ )
a = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 347 | 1 |
from manim import *
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = Rectangle(height=0.5, width=0.5)
_lowercase : List[Any] = Rectangle(height=0.4_6, width=0.4_6).set_stroke(width=0)
_lowercase : Tuple = [mem.copy() for i in range(6)]
_lowercase : Any = [mem.copy() for i in range(6)]
_lowercase : str = VGroup(*lowerCamelCase).arrange(lowerCamelCase, buff=0)
_lowercase : List[str] = VGroup(*lowerCamelCase).arrange(lowerCamelCase, buff=0)
_lowercase : Union[str, Any] = VGroup(lowerCamelCase, lowerCamelCase).arrange(lowerCamelCase, buff=0)
_lowercase : List[Any] = Text('CPU', font_size=24)
_lowercase : Optional[Any] = Group(lowerCamelCase, lowerCamelCase).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowerCamelCase)
_lowercase : Dict = [mem.copy() for i in range(4)]
_lowercase : Union[str, Any] = VGroup(*lowerCamelCase).arrange(lowerCamelCase, buff=0)
_lowercase : int = Text('GPU', font_size=24)
_lowercase : str = Group(lowerCamelCase, lowerCamelCase).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase)
gpu.move_to([-1, -1, 0])
self.add(lowerCamelCase)
_lowercase : str = [mem.copy() for i in range(6)]
_lowercase : Optional[int] = VGroup(*lowerCamelCase).arrange(lowerCamelCase, buff=0)
_lowercase : Union[str, Any] = Text('Model', font_size=24)
_lowercase : Any = Group(lowerCamelCase, lowerCamelCase).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase)
model.move_to([3, -1.0, 0])
self.add(lowerCamelCase)
_lowercase : Any = []
for i, rect in enumerate(lowerCamelCase):
rect.set_stroke(lowerCamelCase)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowercase : Dict = Rectangle(height=0.4_6 / 4, width=0.4_6 / 3).set_stroke(width=0.0).set_fill(lowerCamelCase, opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT), buff=0.0_2, direction=lowerCamelCase)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0], direction=lowerCamelCase, buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1], direction=lowerCamelCase, buff=0.0)
self.add(lowerCamelCase)
cpu_targs.append(lowerCamelCase)
_lowercase : Tuple = [mem.copy() for i in range(6)]
_lowercase : Any = VGroup(*lowerCamelCase).arrange(lowerCamelCase, buff=0)
_lowercase : List[str] = Text('Loaded Checkpoint', font_size=24)
_lowercase : int = Group(lowerCamelCase, lowerCamelCase).arrange(lowerCamelCase, aligned_edge=lowerCamelCase, buff=0.4)
checkpoint.move_to([3, 0.5, 0])
_lowercase : List[str] = Square(side_length=2.2)
key.move_to([-5, 2, 0])
_lowercase : Dict = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0])
self.add(lowerCamelCase, lowerCamelCase)
_lowercase : int = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''', font_size=18, )
blue_text.next_to(lowerCamelCase, DOWN * 2.4, aligned_edge=key_text.get_left())
_lowercase : Any = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''', font_size=24, )
step_a.move_to([2, 2, 0])
self.play(Write(lowerCamelCase), Write(lowerCamelCase))
self.play(Write(lowerCamelCase, run_time=1), Create(lowerCamelCase, run_time=1))
_lowercase : Union[str, Any] = []
_lowercase : int = []
for i, rect in enumerate(lowerCamelCase):
_lowercase : Any = fill.copy().set_fill(lowerCamelCase, opacity=0.7)
target.move_to(lowerCamelCase)
first_animations.append(GrowFromCenter(lowerCamelCase, run_time=1))
_lowercase : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowerCamelCase, run_time=1.5))
self.play(*lowerCamelCase)
self.play(*lowerCamelCase)
self.wait()
| 21 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowerCamelCase : Union[str, Any] = pytest.mark.integration
@require_faiss
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} )
return dset
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
UpperCamelCase : List[Any] = dset.map(
lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ )
UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __UpperCamelCase( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
UpperCamelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase : List[str] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
UpperCamelCase : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=A_ )
UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : Optional[Any] = 1
UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ )
self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1]
UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ )
self.assertRaises(A_ , index.search_batch , queries[0] )
UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores]
UpperCamelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A_ ):
UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dict = faiss.IndexFlat(5 )
UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase : str = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : int = 1
UpperCamelCase , UpperCamelCase : Dict = index.search(A_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def A_ ( _lowerCAmelCase ) -> Optional[int]:
import faiss
UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase : List[Any] = "index.faiss"
UpperCamelCase : List[str] = F"""mock://{index_name}"""
index.save(_lowerCAmelCase , storage_options=mockfs.storage_options )
UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options )
UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : Optional[int] = 1
UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase : List[str] = Elasticsearch()
UpperCamelCase : Union[str, Any] = {"acknowledged": True}
UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
UpperCamelCase : str = "foo"
UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCamelCase : Dict = "foo"
UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCamelCase : Dict = ["foo", "bar", "foobar"]
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ )
UpperCamelCase : str = [scores[0] for scores in total_scores]
UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
# batched queries with timeout
UpperCamelCase : int = ["foo", "bar", "foobar"]
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 )
UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores]
UpperCamelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
| 52 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A ( __UpperCAmelCase ) -> Optional[int]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A ( ) -> Dict:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCAmelCase_ = [1, 2, 3]
with pytest.raises(snake_case_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(snake_case_ , snake_case_ , num_proc=2 )
with pytest.raises(snake_case_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(snake_case_ , snake_case_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = [1, 2]
UpperCAmelCase_ = {"""a""": 1, """b""": 2}
UpperCAmelCase_ = {"""a""": [1, 2], """b""": [3, 4]}
UpperCAmelCase_ = {"""a""": {"""1""": 1}, """b""": 2}
UpperCAmelCase_ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCAmelCase_ = [2, 3]
UpperCAmelCase_ = {"""a""": 2, """b""": 3}
UpperCAmelCase_ = {"""a""": [2, 3], """b""": [4, 5]}
UpperCAmelCase_ = {"""a""": {"""1""": 2}, """b""": 3}
UpperCAmelCase_ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend('''spark''' ):
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
| 355 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """sentencepiece.bpe.model"""}
__snake_case = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
__snake_case = {
"""moussaKam/mbarthez""": 10_24,
"""moussaKam/barthez""": 10_24,
"""moussaKam/barthez-orangesum-title""": 10_24,
}
__snake_case = """▁"""
class __snake_case ( _SCREAMING_SNAKE_CASE ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__ = None , **snake_case__ , ) -> None:
'''simple docstring'''
UpperCAmelCase : List[Any] =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
UpperCAmelCase : Any ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
UpperCAmelCase : List[Any] =vocab_file
UpperCAmelCase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
UpperCAmelCase : Union[str, Any] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCAmelCase : Dict =len(self.sp_model ) - 1
UpperCAmelCase : List[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Tuple =[self.cls_token_id]
UpperCAmelCase : Tuple =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__A , out_type=__A )
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : Any =self.sp_model.PieceToId(__A )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__A )
def UpperCAmelCase__ ( self , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] =[]
UpperCAmelCase : Dict =''''''
UpperCAmelCase : str =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
UpperCAmelCase : Optional[int] =True
UpperCAmelCase : Union[str, Any] =[]
else:
current_sub_tokens.append(__A )
UpperCAmelCase : str =False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __getstate__( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any =self.__dict__.copy()
UpperCAmelCase : List[str] =None
return state
def __setstate__( self , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : Dict ={}
UpperCAmelCase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : List[str] =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
UpperCAmelCase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 348 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = BertTokenizer
__lowerCAmelCase = BertTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = filter_non_english
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
a =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Union[str, Any]:
a ='''UNwant\u00E9d,running'''
a ='''unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.tokenizer_class(self.vocab_file )
a =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
a =self.get_tokenizer()
a =self.get_rust_tokenizer()
a ='''UNwant\u00E9d,running'''
a =tokenizer.tokenize(__A )
a =rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
a =tokenizer.encode(__A , add_special_tokens=__A )
a =rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
a =self.get_rust_tokenizer()
a =tokenizer.encode(__A )
a =rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
# With lower casing
a =self.get_tokenizer(do_lower_case=__A )
a =self.get_rust_tokenizer(do_lower_case=__A )
a ='''UNwant\u00E9d,running'''
a =tokenizer.tokenize(__A )
a =rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
a =tokenizer.encode(__A , add_special_tokens=__A )
a =rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
a =self.get_rust_tokenizer()
a =tokenizer.encode(__A )
a =rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =BasicTokenizer(do_lower_case=__A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =BasicTokenizer()
a ='''a\n\'ll !!to?\'d of, can\'t.'''
a =['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a ={}
for i, token in enumerate(__A ):
a =i
a =WordpieceTokenizer(vocab=__A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> str:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.get_tokenizer()
a =self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE ( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a =self.rust_tokenizer_class.from_pretrained(__A , **__A )
a =f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
a =tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
a =tokenizer_r.do_lower_case if hasattr(__A , '''do_lower_case''' ) else False
a =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =['''的''', '''人''', '''有''']
a =''''''.join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a =True
a =self.tokenizer_class.from_pretrained(__A , **__A )
a =self.rust_tokenizer_class.from_pretrained(__A , **__A )
a =tokenizer_p.encode(__A , add_special_tokens=__A )
a =tokenizer_r.encode(__A , add_special_tokens=__A )
a =tokenizer_r.convert_ids_to_tokens(__A )
a =tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
a =False
a =self.rust_tokenizer_class.from_pretrained(__A , **__A )
a =self.tokenizer_class.from_pretrained(__A , **__A )
a =tokenizer_r.encode(__A , add_special_tokens=__A )
a =tokenizer_p.encode(__A , add_special_tokens=__A )
a =tokenizer_r.convert_ids_to_tokens(__A )
a =tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
a =[
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A ) | 81 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if number < 1:
_lowerCAmelCase = f'Input value of [number={number}] must be > 0'
raise ValueError(__lowerCamelCase )
_lowerCAmelCase = 1
for i in range(1 , __lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
'''simple docstring'''
import os
def A ():
with open(os.path.dirname(__lowerCamelCase ) + """/grid.txt""" ) as f:
_lowerCAmelCase = [] # noqa: E741
for _ in range(20 ):
l.append([int(__lowerCamelCase ) for x in f.readline().split()] )
_lowerCAmelCase = 0
# right
for i in range(20 ):
for j in range(17 ):
_lowerCAmelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_lowerCAmelCase = temp
# down
for i in range(17 ):
for j in range(20 ):
_lowerCAmelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_lowerCAmelCase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_lowerCAmelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_lowerCAmelCase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_lowerCAmelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_lowerCAmelCase = temp
return maximum
if __name__ == "__main__":
print(solution())
| 229 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 354 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ )
__SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE = single_char_strings[ch]
__SCREAMING_SNAKE_CASE = my_str / all_sum
my_fir_sum += prob * math.loga(a__ ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
__SCREAMING_SNAKE_CASE = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE = int(a__ ) / all_sum
my_sec_sum += prob * math.loga(a__ )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 331 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowercase__ : Optional[Any] = '''CompVis/stable-diffusion-v1-1'''
lowercase__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
lowercase__ : Dict = '''CompVis/stable-diffusion-v1-3'''
lowercase__ : Optional[int] = '''CompVis/stable-diffusion-v1-4'''
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
super()._init_()
__A : List[Any] = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase)
__A : List[Any] = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase)
__A : Optional[int] = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase)
__A : int = StableDiffusionPipeline(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , requires_safety_checker=_UpperCAmelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {k: getattr(self , _UpperCAmelCase) for k in self.config.keys() if not k.startswith('_')}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = "auto"):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__A : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.enable_attention_slicing(_UpperCAmelCase)
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ):
'''simple docstring'''
return self.pipea(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ):
'''simple docstring'''
return self.pipea(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ):
'''simple docstring'''
return self.pipea(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ):
'''simple docstring'''
return self.pipea(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(_UpperCAmelCase)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.')
# Get first result from Stable Diffusion Checkpoint v1.1
__A : Union[str, Any] = self.textaimg_sda_a(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
__A : Dict = self.textaimg_sda_a(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
__A : Union[str, Any] = self.textaimg_sda_a(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
__A : Tuple = self.textaimg_sda_a(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]]) | 190 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )]
return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )]
def _lowerCAmelCase ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ) -> bool:
__A : NDArray[floataa] = cross(__snake_case , __snake_case )
__A : float = sum(__snake_case )
return abs(__snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase__ : List[Any] = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase__ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase__ : Optional[int] = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod() | 190 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase__ = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
lowercase__ = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
lowercase__ = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return float((preds == labels).mean() )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : int = float(fa_score(y_true=UpperCAmelCase_ , y_pred=UpperCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = float(pearsonr(UpperCAmelCase_ , UpperCAmelCase_ )[0] )
UpperCAmelCase : int = float(spearmanr(UpperCAmelCase_ , UpperCAmelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ) -> int:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase_ , lowercase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase_ , lowercase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase_ , lowercase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 365 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """perceiver"""
def __init__( self : str , lowercase_ : List[str]=256 , lowercase_ : List[str]=1_280 , lowercase_ : str=768 , lowercase_ : Tuple=1 , lowercase_ : str=26 , lowercase_ : List[Any]=8 , lowercase_ : int=8 , lowercase_ : List[str]=None , lowercase_ : Dict=None , lowercase_ : int="kv" , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=1 , lowercase_ : Any="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=262 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Optional[int]=56 , lowercase_ : int=[368, 496] , lowercase_ : str=16 , lowercase_ : Optional[int]=1_920 , lowercase_ : Tuple=16 , lowercase_ : int=[1, 16, 224, 224] , **lowercase_ : Union[str, Any] , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : Union[str, Any] = num_latents
UpperCAmelCase : List[Any] = d_latents
UpperCAmelCase : Dict = d_model
UpperCAmelCase : Dict = num_blocks
UpperCAmelCase : Optional[int] = num_self_attends_per_block
UpperCAmelCase : Optional[Any] = num_self_attention_heads
UpperCAmelCase : Optional[Any] = num_cross_attention_heads
UpperCAmelCase : Tuple = qk_channels
UpperCAmelCase : Optional[int] = v_channels
UpperCAmelCase : str = cross_attention_shape_for_attention
UpperCAmelCase : Union[str, Any] = self_attention_widening_factor
UpperCAmelCase : List[Any] = cross_attention_widening_factor
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Any = use_query_residual
# masked language modeling attributes
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : List[Any] = max_position_embeddings
# image classification attributes
UpperCAmelCase : str = image_size
# flow attributes
UpperCAmelCase : Any = train_size
# multimodal autoencoding attributes
UpperCAmelCase : Any = num_frames
UpperCAmelCase : List[Any] = audio_samples_per_frame
UpperCAmelCase : Tuple = samples_per_patch
UpperCAmelCase : Union[str, Any] = output_shape
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self : str ) -> float:
return 1E-4
def UpperCAmelCase_ ( self : str , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowercase_ , lowercase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : int = preprocessor.num_special_tokens_to_add(lowercase_ )
UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : List[Any] = [' '.join(['a'] ) * seq_length] * batch_size
UpperCAmelCase : Union[str, Any] = dict(preprocessor(lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Union[str, Any] = inputs.pop('input_ids' )
return inputs
elif isinstance(lowercase_ , lowercase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase : Any = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Dict = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 280 | 0 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : str ) ->Any:
'''simple docstring'''
a : Tuple = OmegaConf.load(__UpperCamelCase )
a : Union[str, Any] = torch.load(__UpperCamelCase , map_location="cpu" )["model"]
a : Union[str, Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
a : Optional[int] = {}
a : Optional[Any] = "first_stage_model."
for key in keys:
if key.startswith(__UpperCamelCase ):
a : Tuple = state_dict[key]
# extract state_dict for UNetLDM
a : Dict = {}
a : int = "model.diffusion_model."
for key in keys:
if key.startswith(__UpperCamelCase ):
a : Optional[int] = state_dict[key]
a : Union[str, Any] = config.model.params.first_stage_config.params
a : str = config.model.params.unet_config.params
a : Any = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
a : Tuple = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
a : Optional[int] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
a : Any = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
a : Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 105 |
"""simple docstring"""
import argparse
import struct
import unittest
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int], _lowerCamelCase : bytes ):
'''simple docstring'''
__A = data
# Initialize hash values
__A = [
0X6a_09e_667,
0Xbb_67a_e85,
0X3c_6ef_372,
0Xa5_4ff_53a,
0X51_0e5_27f,
0X9b_056_88c,
0X1f_83d_9ab,
0X5b_e0c_d19,
]
# Initialize round constants
__A = [
0X42_8a2_f98,
0X71_374_491,
0Xb5_c0f_bcf,
0Xe9_b5d_ba5,
0X39_56c_25b,
0X59_f11_1f1,
0X92_3f8_2a4,
0Xab_1c5_ed5,
0Xd8_07a_a98,
0X12_835_b01,
0X24_318_5be,
0X55_0c7_dc3,
0X72_be5_d74,
0X80_deb_1fe,
0X9b_dc0_6a7,
0Xc1_9bf_174,
0Xe4_9b6_9c1,
0Xef_be4_786,
0X0f_c19_dc6,
0X24_0ca_1cc,
0X2d_e92_c6f,
0X4a_748_4aa,
0X5c_b0a_9dc,
0X76_f98_8da,
0X98_3e5_152,
0Xa8_31c_66d,
0Xb0_032_7c8,
0Xbf_597_fc7,
0Xc6_e00_bf3,
0Xd5_a79_147,
0X06_ca6_351,
0X14_292_967,
0X27_b70_a85,
0X2e_1b2_138,
0X4d_2c6_dfc,
0X53_380_d13,
0X65_0a7_354,
0X76_6a0_abb,
0X81_c2c_92e,
0X92_722_c85,
0Xa2_bfe_8a1,
0Xa8_1a6_64b,
0Xc2_4b8_b70,
0Xc7_6c5_1a3,
0Xd1_92e_819,
0Xd6_990_624,
0Xf4_0e3_585,
0X10_6aa_070,
0X19_a4c_116,
0X1e_376_c08,
0X27_487_74c,
0X34_b0b_cb5,
0X39_1c0_cb3,
0X4e_d8a_a4a,
0X5b_9cc_a4f,
0X68_2e6_ff3,
0X74_8f8_2ee,
0X78_a56_36f,
0X84_c87_814,
0X8c_c70_208,
0X90_bef_ffa,
0Xa4_506_ceb,
0Xbe_f9a_3f7,
0Xc6_717_8f2,
]
__A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ):
'''simple docstring'''
__A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64))
__A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
# Convert into blocks of 64 bytes
__A = [
self.preprocessed_data[x : x + 64]
for x in range(0, len(self.preprocessed_data ), 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__A = list(struct.unpack('''>16L''', _lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__A , __A , __A , __A , __A , __A , __A , __A = self.hashes
for index in range(0, 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__A = (
self.ror(words[index - 15], 7 )
^ self.ror(words[index - 15], 18 )
^ (words[index - 15] >> 3)
)
__A = (
self.ror(words[index - 2], 17 )
^ self.ror(words[index - 2], 19 )
^ (words[index - 2] >> 10)
)
__A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
__A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 )
__A = (e & f) ^ ((~e & 0Xff_fff_fff) & g)
__A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
__A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 )
__A = (a & b) ^ (a & c) ^ (b & c)
__A = (sa + maj) % 0X100_000_000
__A , __A , __A , __A , __A , __A , __A , __A = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
__A = [a, b, c, d, e, f, g, h]
# Modify final values
__A = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
__A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ):
'''simple docstring'''
return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
import hashlib
__A = bytes('''Test String''', '''utf-8''' )
self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() )
def lowerCAmelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
__A = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
__A = parser.parse_args()
__A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
__A = f.read()
else:
__A = bytes(__UpperCamelCase , '''utf-8''' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 266 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError('String lengths must match!' )
lowerCAmelCase__ : Optional[int] = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 307 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 307 | 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
a = get_tests_dir('''fixtures/dummy-config.json''')
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] ):
_A = 0
def lowerCAmelCase_ ( self : Any ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = AutoConfig.for_model('roberta' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_A = os.path.join(_UpperCAmelCase , 'fake-roberta' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
_A = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertEqual(type(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
try:
AutoConfig.register('custom' , _UpperCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoConfig.register('model' , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoConfig.register('bert' , _UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_A = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase )
_A = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase_ ( self : Optional[int] ):
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
_A = AutoConfig.from_pretrained('bert-base' )
def lowerCAmelCase_ ( self : int ):
with self.assertRaisesRegex(
_UpperCAmelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_A = AutoConfig.from_pretrained(_UpperCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : List[str] ):
with self.assertRaisesRegex(
_UpperCAmelCase , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
_A = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCAmelCase ):
_A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
_A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_UpperCAmelCase )
_A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase )
_A = AutoConfig.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowerCAmelCase_ ( self : Tuple ):
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = '''new-model'''
try:
AutoConfig.register('new-model' , _UpperCAmelCase )
# If remote code is not set, the default is to use local
_A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
_A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
_A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 315 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ):
_A = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 315 | 1 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Tuple = RoFormerTokenizer
__snake_case : Tuple = RoFormerTokenizerFast
__snake_case : str = True
__snake_case : Any = True
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : int ,**lowerCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,**lowerCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """永和服装饰品有限公司,今天天气非常好"""
SCREAMING_SNAKE_CASE = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,output_text.split() )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,output_text.split() )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
pass
| 193 |
from itertools import permutations
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE = [7, 11, 13, 17]
for i, test in enumerate(_SCREAMING_SNAKE_CASE ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
for num in permutations(range(_SCREAMING_SNAKE_CASE ) )
if is_substring_divisible(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 193 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
lowerCamelCase_ = TextaTextGenerationPipeline(model=lowercase , tokenizer=lowercase )
return generator, ["Something to write", "Something else"]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = generator("Something there" )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
lowerCamelCase_ = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
] , )
lowerCamelCase_ = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
] , )
with self.assertRaises(lowercase ):
generator(4 )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
lowerCamelCase_ = generator("Something there" , do_sample=lowercase )
self.assertEqual(lowercase , [{"generated_text": ""}] )
lowerCamelCase_ = 3
lowerCamelCase_ = generator(
"Something there" , num_return_sequences=lowercase , num_beams=lowercase , )
lowerCamelCase_ = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowercase , lowercase )
lowerCamelCase_ = generator("This is a test" , do_sample=lowercase , num_return_sequences=2 , return_tensors=lowercase )
self.assertEqual(
lowercase , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
lowerCamelCase_ = generator.model.config.eos_token_id
lowerCamelCase_ = "<pad>"
lowerCamelCase_ = generator(
["This is a test", "This is a second test"] , do_sample=lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase , )
self.assertEqual(
lowercase , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
lowerCamelCase_ = generator("Something there" , do_sample=lowercase )
self.assertEqual(lowercase , [{"generated_text": ""}] )
| 19 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCAmelCase ( __snake_case):
_a = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
_a = "CIDAS/clipseg-rd64-refined"
_a = "image_segmenter"
_a = CLIPSegForImageSegmentation
_a = ["image", "text"]
_a = ["image"]
def __init__( self: Tuple , *_lowerCAmelCase: str , **_lowerCAmelCase: Tuple ):
requires_backends(self , ["vision"] )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: "Image" , _lowerCAmelCase: str ):
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase_ , return_tensors="pt" )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int ):
with torch.no_grad():
lowercase :Dict = self.model(**lowerCamelCase_ ).logits
return logits
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Union[str, Any] ):
lowercase :Tuple = outputs.cpu().detach().numpy()
lowercase :Dict = 0
lowercase :List[Any] = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 365 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase):
_a = (DDIMParallelScheduler,)
_a = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE ( self: Any , **_lowerCAmelCase: Optional[Any] ):
lowercase :List[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self: str , **_lowerCAmelCase: Any ):
lowercase :Optional[int] = self.scheduler_classes[0]
lowercase :Dict = self.get_scheduler_config(**_lowerCAmelCase )
lowercase :List[str] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :str = 10, 0.0
lowercase :List[Any] = self.dummy_model()
lowercase :int = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for t in scheduler.timesteps:
lowercase :Optional[int] = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Dict = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
lowercase :Optional[Any] = self.scheduler_classes[0]
lowercase :List[str] = self.get_scheduler_config(steps_offset=1 )
lowercase :Optional[int] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict ):
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: str ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCAmelCase , eta=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Dict = self.scheduler_classes[0]
lowercase :Tuple = self.get_scheduler_config()
lowercase :Optional[Any] = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = self.scheduler_classes[0]
lowercase :Union[str, Any] = self.get_scheduler_config()
lowercase :Union[str, Any] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :Union[str, Any] = 10, 0.0
scheduler.set_timesteps(_lowerCAmelCase )
lowercase :Dict = self.dummy_model()
lowercase :Dict = self.dummy_sample_deter
lowercase :Union[str, Any] = self.dummy_sample_deter + 0.1
lowercase :int = self.dummy_sample_deter - 0.1
lowercase :Dict = samplea.shape[0]
lowercase :Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase :Optional[Any] = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
lowercase :Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase :Optional[int] = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCAmelCase )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :int = self.full_loop()
lowercase :Optional[int] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Dict = self.full_loop(prediction_type="v_prediction" )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowercase :List[Any] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
# We specify different beta, so that the first alpha is 0.99
lowercase :Tuple = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :str = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :List[str] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 158 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a , _a , _a=1 , _a=False , **_a ) -> Optional[int]:
super().__init__(**_a )
_A : List[Any] = vocab_size
_A : Union[str, Any] = d_embed
_A : str = d_proj
_A : List[str] = cutoffs + [vocab_size]
_A : Optional[int] = [0] + self.cutoffs
_A : Optional[Any] = div_val
_A : List[str] = self.cutoffs[0]
_A : List[str] = len(self.cutoffs ) - 1
_A : Tuple = self.shortlist_size + self.n_clusters
_A : List[str] = keep_order
_A : Tuple = []
_A : int = []
def a__ ( self , _a ) -> int:
if self.n_clusters > 0:
_A : int = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=_a , name="""cluster_weight""" )
_A : List[str] = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=_a , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_A : int = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=_a , name=F'''out_projs_._{i}''' , )
self.out_projs.append(_a )
else:
self.out_projs.append(_a )
_A : str = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=_a , name=F'''out_layers_._{i}_._weight''' , )
_A : Union[str, Any] = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=_a , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_A , _A : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A : Optional[int] = self.d_embed // (self.div_val**i)
_A : Dict = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=_a , name=F'''out_projs_._{i}''' )
self.out_projs.append(_a )
_A : Optional[int] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=_a , name=F'''out_layers_._{i}_._weight''' , )
_A : int = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=_a , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(_a )
@staticmethod
def a__ ( _a , _a , _a , _a=None ) -> Dict:
_A : Tuple = x
if proj is not None:
_A : Any = tf.einsum("""ibd,ed->ibe""" , _a , _a )
return tf.einsum("""ibd,nd->ibn""" , _a , _a ) + b
@staticmethod
def a__ ( _a , _a ) -> Optional[Any]:
_A : Any = shape_list(_a )
_A : List[str] = tf.range(lp_size[0] , dtype=target.dtype )
_A : Optional[Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(_a , _a )
def a__ ( self , _a , _a , _a=True , _a=False ) -> Optional[Any]:
_A : Union[str, Any] = 0
if self.n_clusters == 0:
_A : List[Any] = self._logit(_a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_A : int = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_a , logits=_a )
_A : Union[str, Any] = tf.nn.log_softmax(_a , axis=-1 )
else:
_A : Optional[Any] = shape_list(_a )
_A : Optional[Any] = []
_A : Optional[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_A , _A : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_A : int = (target >= l_idx) & (target < r_idx)
_A : str = tf.where(_a )
_A : Dict = tf.boolean_mask(_a , _a ) - l_idx
if self.div_val == 1:
_A : str = self.out_layers[0][0][l_idx:r_idx]
_A : List[str] = self.out_layers[0][1][l_idx:r_idx]
else:
_A : List[Any] = self.out_layers[i][0]
_A : int = self.out_layers[i][1]
if i == 0:
_A : int = tf.concat([cur_W, self.cluster_weight] , 0 )
_A : int = tf.concat([cur_b, self.cluster_bias] , 0 )
_A : Dict = self._logit(_a , _a , _a , self.out_projs[0] )
_A : Optional[Any] = tf.nn.log_softmax(_a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_A : Tuple = tf.boolean_mask(_a , _a )
_A : Union[str, Any] = self._gather_logprob(_a , _a )
else:
_A : Dict = self._logit(_a , _a , _a , self.out_projs[i] )
_A : Union[str, Any] = tf.nn.log_softmax(_a )
_A : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
_A : Dict = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_a )
if target is not None:
_A : Optional[int] = tf.boolean_mask(_a , _a )
_A : Optional[Any] = tf.boolean_mask(_a , _a )
_A : Tuple = self._gather_logprob(_a , _a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_a , -cur_logprob , shape_list(_a ) )
_A : Optional[Any] = tf.concat(_a , axis=-1 )
if target is not None:
if return_mean:
_A : Tuple = tf.reduce_mean(_a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_a , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 26 |
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = len(_A )
for i in range(_A ):
for j in range(i + 1 , _A ):
if numbers[j] < numbers[i]:
snake_case_ , snake_case_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase__ : int = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : Any = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 187 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Any = logging.get_logger(__name__)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['pixel_values']
def __init__( self : Optional[int] , lowerCamelCase : bool = True , lowerCamelCase : Optional[Dict[str, int]] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 2_55 , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , **lowerCamelCase : Any , ) -> None:
super().__init__(**lowerCamelCase )
lowerCAmelCase_ : int = size if size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCAmelCase_ : int = get_size_dict(lowerCamelCase )
lowerCAmelCase_ : Dict = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCAmelCase_ : Optional[Any] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="""crop_size""" )
lowerCAmelCase_ : str = do_resize
lowerCAmelCase_ : int = do_rescale
lowerCAmelCase_ : Any = do_normalize
lowerCAmelCase_ : List[Any] = do_center_crop
lowerCAmelCase_ : str = crop_size
lowerCAmelCase_ : Tuple = size
lowerCAmelCase_ : Optional[Any] = resample
lowerCAmelCase_ : str = rescale_factor
lowerCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowercase ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Optional[Any] , ) -> np.ndarray:
lowerCAmelCase_ : Any = get_size_dict(lowerCamelCase )
if "shortest_edge" in size:
lowerCAmelCase_ : Union[str, Any] = get_resize_output_image_size(lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowerCAmelCase_ : Dict = (size["""height"""], size["""width"""])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : Any , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Union[str, Any] , ) -> np.ndarray:
lowerCAmelCase_ : str = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : np.ndarray , lowerCamelCase : float , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : List[Any] ) -> np.ndarray:
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Any , ) -> np.ndarray:
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : str , lowerCamelCase : ImageInput , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : int = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase : Optional[int] , ) -> BatchFeature:
lowerCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Any = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : Union[str, Any] = get_size_dict(lowerCamelCase , param_name="""crop_size""" , default_to_square=lowerCamelCase )
lowerCAmelCase_ : Tuple = resample if resample is not None else self.resample
lowerCAmelCase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : List[Any] = size if size is not None else self.size
lowerCAmelCase_ : Dict = get_size_dict(lowerCamelCase )
if not is_batched(lowerCamelCase ):
lowerCAmelCase_ : str = [images]
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Union[str, Any] = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
lowerCAmelCase_ : Any = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase_ : Optional[int] = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Union[str, Any] = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase_ : Dict = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
lowerCAmelCase_ : List[str] = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
lowerCAmelCase_ : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 364 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@property
def __lowercase ( self : str ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowercase ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = self.dummy_uncond_unet
lowerCAmelCase_ : Tuple = PNDMScheduler()
lowerCAmelCase_ : List[Any] = PNDMPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pndm.to(lowerCamelCase )
pndm.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = pndm(generator=lowerCamelCase , num_inference_steps=20 , output_type="""numpy""" ).images
lowerCAmelCase_ : str = torch.manual_seed(0 )
lowerCAmelCase_ : int = pndm(generator=lowerCamelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=lowerCamelCase )[0]
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> Tuple:
lowerCAmelCase_ : str = """google/ddpm-cifar10-32"""
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained(lowerCamelCase )
lowerCAmelCase_ : Dict = PNDMScheduler()
lowerCAmelCase_ : Union[str, Any] = PNDMPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pndm.to(lowerCamelCase )
pndm.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pndm(generator=lowerCamelCase , output_type="""numpy""" ).images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Dict = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_snake_case = logging.get_logger('transformers.models.speecht5')
_snake_case = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
_snake_case = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
_snake_case = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
_snake_case = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
_snake_case = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
_snake_case = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
_snake_case = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
_snake_case = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
_snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case = []
_snake_case = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
_snake_case = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
_snake_case = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
_snake_case = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
for attribute in key.split("." ):
_lowercase : Dict = getattr(snake_case , snake_case )
if weight_type is not None:
_lowercase : Union[str, Any] = getattr(snake_case , snake_case ).shape
else:
_lowercase : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowercase : str = value
elif weight_type == "weight_g":
_lowercase : List[str] = value
elif weight_type == "weight_v":
_lowercase : int = value
elif weight_type == "bias":
_lowercase : Union[str, Any] = value
elif weight_type == "running_mean":
_lowercase : Union[str, Any] = value
elif weight_type == "running_var":
_lowercase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowercase : Tuple = value
else:
_lowercase : int = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _A ( snake_case , snake_case ) -> str:
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowercase , _lowercase : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _A ( snake_case , snake_case , snake_case ) -> Tuple:
_lowercase : List[Any] = []
if task == "s2t":
_lowercase : str = hf_model.speechta.encoder.prenet.feature_encoder
_lowercase : List[str] = MAPPING_S2T
_lowercase : Optional[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
_lowercase : List[Any] = None
_lowercase : Tuple = MAPPING_T2S
_lowercase : List[str] = IGNORE_KEYS_T2S
elif task == "s2s":
_lowercase : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
_lowercase : Optional[Any] = MAPPING_S2S
_lowercase : Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case , snake_case ):
logger.info(F'''{name} was ignored''' )
continue
_lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == "group" , )
_lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_lowercase , _lowercase : Optional[int] = key.split(".*." )
if prefix in name and suffix in name:
_lowercase : Dict = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_lowercase : Optional[Any] = True
if "*" in mapped_key:
_lowercase : int = name.split(snake_case )[0].split("." )[-2]
_lowercase : Union[str, Any] = mapped_key.replace("*" , snake_case )
if "weight_g" in name:
_lowercase : Dict = "weight_g"
elif "weight_v" in name:
_lowercase : Optional[Any] = "weight_v"
elif "bias" in name:
_lowercase : Any = "bias"
elif "weight" in name:
_lowercase : Dict = "weight"
elif "running_mean" in name:
_lowercase : List[Any] = "running_mean"
elif "running_var" in name:
_lowercase : Union[str, Any] = "running_var"
elif "num_batches_tracked" in name:
_lowercase : str = "num_batches_tracked"
else:
_lowercase : str = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
_lowercase : Optional[int] = full_name.split("conv_layers." )[-1]
_lowercase : Tuple = name.split("." )
_lowercase : Optional[int] = int(items[0] )
_lowercase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowercase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowercase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowercase : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowercase : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ) -> Optional[Any]:
if config_path is not None:
_lowercase : List[str] = SpeechTaConfig.from_pretrained(snake_case )
else:
_lowercase : int = SpeechTaConfig()
if task == "s2t":
_lowercase : List[Any] = config.max_text_positions
_lowercase : Optional[int] = SpeechTaForSpeechToText(snake_case )
elif task == "t2s":
_lowercase : str = 18_76
_lowercase : str = 6_00
_lowercase : List[Any] = config.max_speech_positions
_lowercase : Optional[int] = SpeechTaForTextToSpeech(snake_case )
elif task == "s2s":
_lowercase : Tuple = 18_76
_lowercase : List[Any] = config.max_speech_positions
_lowercase : str = SpeechTaForSpeechToSpeech(snake_case )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
_lowercase : Any = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_lowercase : Optional[int] = AddedToken("<mask>" , lstrip=snake_case , rstrip=snake_case )
_lowercase : int = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
_lowercase : Dict = SpeechTaFeatureExtractor()
_lowercase : str = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case )
processor.save_pretrained(snake_case )
_lowercase : Union[str, Any] = torch.load(snake_case )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case , snake_case )
model.save_pretrained(snake_case )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 250 | 0 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : Optional[int] = data
lowerCAmelCase_ : Tuple = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ):
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FF_FF_FF
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = b'\x80' + b'\x00' * (6_3 - (len(self.data ) + 8) % 6_4)
lowerCAmelCase_ : List[str] = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def SCREAMING_SNAKE_CASE__ ( self : int ):
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = list(struct.unpack('>16L' , SCREAMING_SNAKE_CASE_ ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
lowerCAmelCase_ : int = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Any = self.padding()
lowerCAmelCase_ : Tuple = self.split_blocks()
for block in self.blocks:
lowerCAmelCase_ : List[str] = self.expand_block(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
lowerCAmelCase_ : Union[str, Any] = (b & c) | ((~b) & d)
lowerCAmelCase_ : Optional[Any] = 0X5A_82_79_99
elif 2_0 <= i < 4_0:
lowerCAmelCase_ : int = b ^ c ^ d
lowerCAmelCase_ : Any = 0X6E_D9_EB_A1
elif 4_0 <= i < 6_0:
lowerCAmelCase_ : str = (b & c) | (b & d) | (c & d)
lowerCAmelCase_ : List[str] = 0X8F_1B_BC_DC
elif 6_0 <= i < 8_0:
lowerCAmelCase_ : Optional[Any] = b ^ c ^ d
lowerCAmelCase_ : Optional[Any] = 0XCA_62_C1_D6
lowerCAmelCase_ : int = (
self.rotate(SCREAMING_SNAKE_CASE_ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(SCREAMING_SNAKE_CASE_ , 3_0 ),
c,
d,
)
lowerCAmelCase_ : Any = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : str = b'Test String'
assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowerCAmelCase_ : List[str] = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.read()
else:
lowerCAmelCase_ : List[str] = bytes(lowerCAmelCase__ , 'utf-8' )
print(SHAaHash(lowerCAmelCase__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 359 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowercase__ : Tuple = datasets.logging.get_logger(__name__)
lowercase__ : List[Any] = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
lowercase__ : Tuple = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
lowercase__ : List[Any] = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
lowercase__ : List[Any] = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : str ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
lowerCAmelCase_ : List[Any] = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase_ : List[Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase_ : Tuple = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase_ : List[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase_ : List[str] = score.BleurtScorer(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : Tuple = self.scorer.score(references=SCREAMING_SNAKE_CASE_ , candidates=SCREAMING_SNAKE_CASE_ )
return {"scores": scores}
| 289 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __A (snake_case__):
'''simple docstring'''
__lowercase: jnp.ndarray
@flax_register_to_config
class __A (nn.Module , snake_case__ , snake_case__):
'''simple docstring'''
__lowercase: int = 32
__lowercase: int = 4
__lowercase: int = 4
__lowercase: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__lowercase: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__lowercase: Union[bool, Tuple[bool]] = False
__lowercase: Tuple[int] = (3_20, 6_40, 12_80, 12_80)
__lowercase: int = 2
__lowercase: Union[int, Tuple[int]] = 8
__lowercase: Optional[Union[int, Tuple[int]]] = None
__lowercase: int = 12_80
__lowercase: float = 0.0
__lowercase: bool = False
__lowercase: jnp.dtype = jnp.floataa
__lowercase: bool = True
__lowercase: int = 0
__lowercase: bool = False
def lowerCAmelCase ( self : int , UpperCAmelCase_ : jax.random.KeyArray ) ->FrozenDict:
"""simple docstring"""
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ , snake_case_ = jax.random.split(UpperCAmelCase_ )
snake_case_ = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )["params"]
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(UpperCAmelCase_ , dtype=self.dtype )
snake_case_ = self.only_cross_attention
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(UpperCAmelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase_ )
snake_case_ = down_blocks
# mid
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case_ = []
snake_case_ = list(reversed(UpperCAmelCase_ ) )
snake_case_ = list(reversed(UpperCAmelCase_ ) )
snake_case_ = list(reversed(UpperCAmelCase_ ) )
snake_case_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case_ = output_channel
snake_case_ = reversed_block_out_channels[i]
snake_case_ = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase_ ) - 1 )]
snake_case_ = i == len(UpperCAmelCase_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case_ = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxUpBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase_ )
snake_case_ = output_channel
snake_case_ = up_blocks
# out
snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
snake_case_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(UpperCAmelCase_ , 0 )
snake_case_ = self.time_proj(UpperCAmelCase_ )
snake_case_ = self.time_embedding(UpperCAmelCase_ )
# 2. pre-process
snake_case_ = jnp.transpose(UpperCAmelCase_ , (0, 2, 3, 1) )
snake_case_ = self.conv_in(UpperCAmelCase_ )
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ , snake_case_ = down_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
else:
snake_case_ , snake_case_ = down_block(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase_ , UpperCAmelCase_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case_ = new_down_block_res_samples
# 4. mid
snake_case_ = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = up_block(
UpperCAmelCase_ , temb=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , deterministic=not train , )
else:
snake_case_ = up_block(UpperCAmelCase_ , temb=UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , deterministic=not train )
# 6. post-process
snake_case_ = self.conv_norm_out(UpperCAmelCase_ )
snake_case_ = nn.silu(UpperCAmelCase_ )
snake_case_ = self.conv_out(UpperCAmelCase_ )
snake_case_ = jnp.transpose(UpperCAmelCase_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase_ )
| 347 |
"""simple docstring"""
import datasets
__SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
| 347 | 1 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : Any=2_8_1_2_3 ) -> Tuple:
A_ : Tuple = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
A_ : Union[str, Any] = set()
A_ : Dict = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 350 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def A ( snake_case :Sequence[float] , snake_case :int , snake_case :int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__UpperCamelCase = (low + high) // 2
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = max_subarray(snake_case , snake_case , snake_case )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = max_subarray(snake_case , mid + 1 , snake_case )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = max_cross_sum(snake_case , snake_case , snake_case , snake_case )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def A ( snake_case :Sequence[float] , snake_case :int , snake_case :int , snake_case :int ) -> tuple[int, int, float]:
__UpperCamelCase , __UpperCamelCase = float('-inf' ), -1
__UpperCamelCase , __UpperCamelCase = float('-inf' ), -1
__UpperCamelCase = 0
for i in range(snake_case , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__UpperCamelCase = summ
__UpperCamelCase = i
__UpperCamelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__UpperCamelCase = summ
__UpperCamelCase = i
return max_left, max_right, (left_sum + right_sum)
def A ( snake_case :int ) -> float:
__UpperCamelCase = [randint(1 , snake_case ) for _ in range(snake_case )]
__UpperCamelCase = time.time()
max_subarray(snake_case , 0 , input_size - 1 )
__UpperCamelCase = time.time()
return end - start
def A ( ) -> None:
__UpperCamelCase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__UpperCamelCase = [time_max_subarray(snake_case ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(snake_case , snake_case ):
print(snake_case , '\t\t' , snake_case )
plt.plot(snake_case , snake_case )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 316 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=a__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=a__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=a__ )
return parser.parse_args()
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parse_args()
# Import training_script as a module.
__SCREAMING_SNAKE_CASE = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__SCREAMING_SNAKE_CASE = script_fpath.stem
__SCREAMING_SNAKE_CASE = importlib.import_module(a__ )
# Patch sys.argv
__SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 365 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ = 10000
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase__ = ParquetConfig
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 331 | 0 |
lowerCamelCase = 8.3144598
def UpperCAmelCase__ ( _A : float , _A : float ):
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCamelCase = 300
lowerCamelCase = 28
lowerCamelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 188 | '''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def a ( self : int ) -> Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a ( self : List[Any] ) -> Any:
__lowerCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(SCREAMING_SNAKE_CASE__ ):
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , example_records[i] )
def a ( self : Tuple ) -> List[str]:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a ( self : List[str] ) -> List[str]: # checks what happens with missing columns
__lowerCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a ( self : Dict ) -> Optional[int]: # checks if the type can be inferred from the second record
__lowerCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = Dataset.from_list([] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 229 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Union[str, Any] = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[Any] =1
lowerCamelCase_ : Union[str, Any] =3
lowerCamelCase_ : Dict =(32, 32)
lowerCamelCase_ : List[Any] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Any =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(snake_case__ )
@property
def UpperCAmelCase__ ( self : int ):
def extract(*snake_case__ : Dict , **snake_case__ : int ):
class lowercase__ :
def __init__( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =torch.ones([0] )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any ):
self.pixel_values.to(snake_case__ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Dict ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Dict =self.dummy_cond_unet
lowerCamelCase_ : List[str] =PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCamelCase_ : List[Any] =self.dummy_vae
lowerCamelCase_ : Any =self.dummy_text_encoder
lowerCamelCase_ : List[Any] =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCamelCase_ : List[str] =77
lowerCamelCase_ : Optional[int] =self.dummy_image.to(snake_case__ )
lowerCamelCase_ : Any =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : List[Any] =AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : int =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
lowerCamelCase_ : Optional[int] =alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Any ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Union[str, Any] =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : str =alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case__ , )
lowerCamelCase_ : List[Any] =output.images
lowerCamelCase_ : str =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] =alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case__ , return_dict=snake_case__ , )[0]
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
lowerCamelCase_ : Union[str, Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : List[str] =np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Dict =self.dummy_cond_unet
lowerCamelCase_ : Any =PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCamelCase_ : Tuple =self.dummy_vae
lowerCamelCase_ : Union[str, Any] =self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCamelCase_ : Tuple =77
lowerCamelCase_ : str =self.dummy_image.to(snake_case__ )
# put models in fp16
lowerCamelCase_ : Optional[Any] =unet.half()
lowerCamelCase_ : Dict =vae.half()
lowerCamelCase_ : str =bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Optional[int] =AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
lowerCamelCase_ : int =alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Tuple ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Tuple =torch.manual_seed(0 )
lowerCamelCase_ : List[Any] =alt_pipe(
[prompt] , generator=snake_case__ , num_inference_steps=2 , output_type="np" , image=snake_case__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : Any =init_image.resize((760, 504) )
lowerCamelCase_ : List[str] ="BAAI/AltDiffusion"
lowerCamelCase_ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Optional[int] ="A fantasy landscape, trending on artstation"
lowerCamelCase_ : List[Any] =torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] =pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : Optional[int] =output.images[0]
lowerCamelCase_ : Tuple =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCamelCase_ : Optional[int] =np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : str =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCamelCase_ : Any =init_image.resize((768, 512) )
lowerCamelCase_ : Optional[int] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowerCamelCase_ : Dict ="BAAI/AltDiffusion"
lowerCamelCase_ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Optional[Any] ="A fantasy landscape, trending on artstation"
lowerCamelCase_ : Dict =torch.manual_seed(0 )
lowerCamelCase_ : int =pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : int =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 209 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : str = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : str = pos_att_type
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Union[str, Any] = 300
return config
def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
__magic_name__ : Dict = DebertaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0]
__magic_name__ : List[str] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
__magic_name__ : List[str] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]:
__magic_name__ : str = self.num_labels
__magic_name__ : int = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : Any = False
A_ : Dict = False
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = DebertaModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 331 | 0 |
import math
def __lowerCamelCase ( __a :int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( __a :int = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
A__ = int(__a )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
A__ = []
A__ = 2
while len(__a ) < nth:
if is_prime(__a ):
primes.append(__a )
num += 1
else:
num += 1
return primes[len(__a ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 276 |
from string import ascii_uppercase
A : List[str] = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def __lowerCamelCase ( __a :int , __a :int ) -> str:
"""simple docstring"""
if isinstance(__a , __a ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__a , __a ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__a , __a ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 3_6:
raise ValueError("""base must be <= 36""" )
A__ = """"""
A__ = 0
A__ = 0
while div != 1:
A__ , A__ = divmod(__a , __a )
if base >= 1_1 and 9 < mod < 3_6:
A__ = ALPHABET_VALUES[str(__a )]
else:
A__ = str(__a )
new_value += actual_value
A__ = num // base
A__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__a )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 276 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) ->Optional[Any]:
_SCREAMING_SNAKE_CASE = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
_SCREAMING_SNAKE_CASE = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_SCREAMING_SNAKE_CASE = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_SCREAMING_SNAKE_CASE = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
_SCREAMING_SNAKE_CASE = os.path.join(get_home_dir() , """models""" )
_SCREAMING_SNAKE_CASE = _load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = original_bort._collect_params_with_prefix()
# Build our config 🤗
_SCREAMING_SNAKE_CASE = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(__lowerCamelCase ),
}
_SCREAMING_SNAKE_CASE = BertConfig.from_dict(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCamelCase : Union[str, Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
_SCREAMING_SNAKE_CASE = hf_param.shape
_SCREAMING_SNAKE_CASE = to_torch(params[gluon_param] )
_SCREAMING_SNAKE_CASE = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
_SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
_SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
_SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
_SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_SCREAMING_SNAKE_CASE = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE = hf_bort_model.bert.encoder.layer[i]
# self attention
_SCREAMING_SNAKE_CASE = layer.attention.self
_SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
_SCREAMING_SNAKE_CASE = layer.attention.output
_SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
_SCREAMING_SNAKE_CASE = layer.intermediate
_SCREAMING_SNAKE_CASE = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
_SCREAMING_SNAKE_CASE = layer.output
_SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
_SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-base""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode_plus(__lowerCamelCase )['input_ids']
# Get gluon output
_SCREAMING_SNAKE_CASE = mx.nd.array([input_ids] )
_SCREAMING_SNAKE_CASE = original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
_SCREAMING_SNAKE_CASE = tokenizer.encode_plus(__lowerCamelCase , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = hf_bort_model(**__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = output_gluon[0].asnumpy()
_SCREAMING_SNAKE_CASE = output_hf[0].detach().numpy()
_SCREAMING_SNAKE_CASE = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_SCREAMING_SNAKE_CASE = np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , __lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 58 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
if components is None:
__A : int = []
__A : Tuple = list(_A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self , _A ):
__A : Optional[int] = len(self )
if size == len(_A ):
__A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self , _A ):
__A : Tuple = len(self )
if size == len(_A ):
__A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , (float, int) ):
__A : str = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
__A : Union[str, Any] = len(self )
__A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self ):
return Vector(self.__components )
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , _A , _A ):
assert -len(self.__components ) <= pos < len(self.__components )
__A : Optional[int] = value
def UpperCAmelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def UpperCAmelCase_ ( self , _A , _A = False ):
__A : Optional[Any] = self * other
__A : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( a ) -> Vector:
assert isinstance(a , a )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector:
assert isinstance(a , a ) and (isinstance(a , a ))
__A : Optional[Any] = [0] * dimension
__A : Tuple = 1
return Vector(a )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
random.seed(a )
__A : str = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[Any] = matrix
__A : Dict = w
__A : Optional[int] = h
def __str__( self ):
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Optional[Any] = []
for i in range(self.__height ):
__A : Optional[Any] = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Tuple = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
__A : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__A : List[str] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A , (int, float) ): # matrix-scalar
__A : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self ):
return self.__height
def UpperCAmelCase_ ( self ):
return self.__width
def UpperCAmelCase_ ( self , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : int = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
__A : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix:
__A : list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix:
random.seed(a )
__A : list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 280 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCAmelCase__ ( lowerCAmelCase__ :Callable[[int | float], int | float] , lowerCAmelCase__ :int | float , lowerCAmelCase__ :int | float , lowerCAmelCase__ :int = 1_0_0 , ) -> float:
'''simple docstring'''
lowercase = x_start
lowercase = fnc(lowerCAmelCase__ )
lowercase = 0.0
for _ in range(lowerCAmelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase = (x_end - x_start) / steps + xa
lowercase = fnc(lowerCAmelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase = xa
lowercase = fxa
return length
if __name__ == "__main__":
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> Any:
'''simple docstring'''
return math.sin(1_0 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
__lowerCAmelCase : Dict =1_0
while i <= 1_0_0_0_0_0:
print(F"""With {i} steps: {line_length(f, -1_0, 1_0, i)}""")
i *= 1_0
| 352 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ViTFeatureExtractor"""]
__lowerCAmelCase : List[str] =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = RoFormerTokenizer
_UpperCAmelCase = RoFormerTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCAmelCase_ ( self: int ) -> Tuple:
super().setUp()
def lowerCAmelCase_ ( self: Tuple , **UpperCamelCase: List[Any] ) -> int:
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple , **UpperCamelCase: Dict ) -> str:
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCamelCase )
def lowerCAmelCase_ ( self: Any ) -> Optional[Any]:
snake_case__ = '永和服装饰品有限公司,今天天气非常好'
snake_case__ = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = self.get_tokenizer()
snake_case__ , snake_case__ = self.get_chinese_input_output_texts()
snake_case__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , output_text.split() )
snake_case__ = tokens + [tokenizer.unk_token]
snake_case__ = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case__ = self.get_rust_tokenizer()
snake_case__ , snake_case__ = self.get_chinese_input_output_texts()
snake_case__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , output_text.split() )
snake_case__ = tokens + [tokenizer.unk_token]
snake_case__ = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
pass
def lowerCAmelCase_ ( self: str ) -> Any:
pass
| 307 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "tokenizer"]
_UpperCAmelCase = "LayoutLMv2ImageProcessor"
_UpperCAmelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Union[str, Any] ) -> int:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
snake_case__ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ = features['words']
snake_case__ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# add pixel values
snake_case__ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
snake_case__ = self.get_overflowing_images(UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] )
snake_case__ = images
return encoded_inputs
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' )
return images_with_overflow
def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCAmelCase_ ( self: str ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCAmelCase_ ( self: Any ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , )
return self.image_processor
| 307 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __A ( metaclass=UpperCamelCase__ ):
a__ : int = ["""flax""", """transformers"""]
def __init__(self : Union[str, Any] , *__a : Optional[Any] , **__a : Dict ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _lowercase (cls : Any , *__a : Optional[Any] , **__a : Union[str, Any] ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _lowercase (cls : Any , *__a : Union[str, Any] , **__a : Union[str, Any] ):
requires_backends(cls , ["flax", "transformers"] )
class __A ( metaclass=UpperCamelCase__ ):
a__ : Dict = ["""flax""", """transformers"""]
def __init__(self : Any , *__a : str , **__a : str ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _lowercase (cls : Optional[int] , *__a : str , **__a : List[str] ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _lowercase (cls : List[str] , *__a : str , **__a : List[Any] ):
requires_backends(cls , ["flax", "transformers"] )
class __A ( metaclass=UpperCamelCase__ ):
a__ : Optional[Any] = ["""flax""", """transformers"""]
def __init__(self : Dict , *__a : Tuple , **__a : Any ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _lowercase (cls : List[Any] , *__a : Optional[int] , **__a : Optional[Any] ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _lowercase (cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Tuple ):
requires_backends(cls , ["flax", "transformers"] )
class __A ( metaclass=UpperCamelCase__ ):
a__ : Optional[int] = ["""flax""", """transformers"""]
def __init__(self : Any , *__a : Union[str, Any] , **__a : Any ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _lowercase (cls : List[str] , *__a : str , **__a : Tuple ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _lowercase (cls : Tuple , *__a : List[Any] , **__a : Union[str, Any] ):
requires_backends(cls , ["flax", "transformers"] )
| 106 | '''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
SCREAMING_SNAKE_CASE_: Tuple =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={'facebook/bart-base': BartForConditionalGeneration}
SCREAMING_SNAKE_CASE_: int ={'facebook/bart-base': BartTokenizer}
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=snake_case_ , default=snake_case_ , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=snake_case_ , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=snake_case_ , default=snake_case_ , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=snake_case_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case_ , )
parser.add_argument(
"--config_name" , type=snake_case_ , default=snake_case_ , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=snake_case_ , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=snake_case_ , default=snake_case_ , help="Where to store the final ONNX file." )
UpperCAmelCase_ = parser.parse_args()
return args
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : int="cpu" ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = model_dict[model_name].from_pretrained(snake_case_ ).to(snake_case_ )
UpperCAmelCase_ = tokenizer_dict[model_name].from_pretrained(snake_case_ )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
return huggingface_model, tokenizer
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Dict ) -> Dict:
'''simple docstring'''
model.eval()
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.jit.script(BARTBeamSearchGenerator(snake_case_ ) )
with torch.no_grad():
UpperCAmelCase_ = "My friends are cool but they eat too many carbs."
UpperCAmelCase_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="pt" ).to(model.device )
UpperCAmelCase_ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=snake_case_ , max_length=snake_case_ , early_stopping=snake_case_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case_ , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=snake_case_ , )
logger.info("Model exported to {}".format(snake_case_ ) )
UpperCAmelCase_ = remove_dup_initializers(os.path.abspath(snake_case_ ) )
logger.info("Deduplicated and optimized model written to {}".format(snake_case_ ) )
UpperCAmelCase_ = onnxruntime.InferenceSession(snake_case_ )
UpperCAmelCase_ = ort_sess.run(
snake_case_ , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(snake_case_ ),
"max_length": np.array(snake_case_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = parse_args()
UpperCAmelCase_ = 5
UpperCAmelCase_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ = torch.device(args.device )
UpperCAmelCase_ , UpperCAmelCase_ = load_model_tokenizer(args.model_name_or_path , snake_case_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(snake_case_ )
if args.max_length:
UpperCAmelCase_ = args.max_length
if args.num_beams:
UpperCAmelCase_ = args.num_beams
if args.output_file_path:
UpperCAmelCase_ = args.output_file_path
else:
UpperCAmelCase_ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 106 | 1 |
def UpperCamelCase__( UpperCamelCase__ : list[list[float]] )->list[list[float]]:
A__ = []
for data in source_data:
for i, el in enumerate(UpperCamelCase__ ):
if len(UpperCamelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCamelCase__ ) )
return data_lists
def UpperCamelCase__( UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : list[int] )->list[list[float]]:
A__ = []
for dlist, weight in zip(UpperCamelCase__ , UpperCamelCase__ ):
A__ = min(UpperCamelCase__ )
A__ = max(UpperCamelCase__ )
A__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A__ = f"Invalid weight of {weight:f} provided"
raise ValueError(UpperCamelCase__ )
score_lists.append(UpperCamelCase__ )
return score_lists
def UpperCamelCase__( UpperCamelCase__ : list[list[float]] )->list[float]:
A__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCamelCase__ ):
A__ = final_scores[j] + ele
return final_scores
def UpperCamelCase__( UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : list[int] )->list[list[float]]:
A__ = get_data(UpperCamelCase__ )
A__ = calculate_each_score(UpperCamelCase__ , UpperCamelCase__ )
A__ = generate_final_scores(UpperCamelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCamelCase__ ):
source_data[i].append(UpperCamelCase__ )
return source_data
| 193 |
a__: Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
a__: str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase__( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] )->list[int]:
A__ = True
A__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def UpperCamelCase__( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] )->list[int]:
A__ = True
A__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def UpperCamelCase__( UpperCamelCase__ : dict[int, list[int]] )->list[list[int]]:
A__ = len(UpperCamelCase__ ) * [False]
A__ = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
A__ = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = []
A__ = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
A__ = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
A__ = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 193 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase = {
'''Salesforce/codegen-350M-mono''': 2_048,
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[Any] = ['input_ids', 'attention_mask']
lowerCAmelCase : str = CodeGenTokenizer
def __init__( self : Dict ,_UpperCAmelCase : Any=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Dict="<|endoftext|>" ,_UpperCAmelCase : Dict="<|endoftext|>" ,_UpperCAmelCase : List[Any]="<|endoftext|>" ,_UpperCAmelCase : Tuple=False ,**_UpperCAmelCase : Union[str, Any] ,):
super().__init__(
_UpperCAmelCase ,_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,**_UpperCAmelCase ,)
if kwargs.pop('add_bos_token' ,_UpperCAmelCase ):
_a : Any = kwargs.pop('name_or_path' ,'' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
_a : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,_UpperCAmelCase ) != add_prefix_space:
_a : List[str] = getattr(_UpperCAmelCase ,pre_tok_state.pop('type' ) )
_a : int = add_prefix_space
_a : str = pre_tok_class(**_UpperCAmelCase )
_a : Optional[Any] = add_prefix_space
def __lowercase ( self : List[str] ,*_UpperCAmelCase : Any ,**_UpperCAmelCase : str ):
_a : Optional[Any] = kwargs.get('is_split_into_words' ,_UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : int ,*_UpperCAmelCase : int ,**_UpperCAmelCase : Union[str, Any] ):
_a : Union[str, Any] = kwargs.get('is_split_into_words' ,_UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : Tuple = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = None ,_UpperCAmelCase : Optional[List[str]] = None ,**_UpperCAmelCase : List[str] ,):
_a : List[Any] = super().decode(
token_ids=_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase ,clean_up_tokenization_spaces=_UpperCAmelCase ,**_UpperCAmelCase ,)
if truncate_before_pattern is not None and len(_UpperCAmelCase ) > 0:
_a : Optional[int] = self.truncate(_UpperCAmelCase ,_UpperCAmelCase )
return decoded_text
def __lowercase ( self : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ):
def find_re(_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ):
_a : int = pattern.search(_UpperCAmelCase ,_UpperCAmelCase )
return m.start() if m else -1
_a : Union[str, Any] = [re.compile(_UpperCAmelCase ,re.MULTILINE ) for pattern in truncate_before_pattern]
_a : Tuple = list(re.finditer('^print' ,_UpperCAmelCase ,re.MULTILINE ) )
if len(_UpperCAmelCase ) > 1:
_a : str = completion[: prints[1].start()]
_a : Tuple = list(re.finditer('^def' ,_UpperCAmelCase ,re.MULTILINE ) )
if len(_UpperCAmelCase ) > 1:
_a : Union[str, Any] = completion[: defs[1].start()]
_a : Union[str, Any] = 0
_a : Optional[Any] = [
pos for pos in [find_re(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_UpperCAmelCase ) > 0:
return completion[: min(_UpperCAmelCase )]
else:
return completion
| 107 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowerCAmelCase = pytest.mark.integration
__lowerCAmelCase = {'''comet'''}
__lowerCAmelCase = importlib.util.find_spec('''fairseq''') is not None
__lowerCAmelCase = {'''code_eval'''}
__lowerCAmelCase = os.name == '''nt'''
__lowerCAmelCase = {'''bertscore''', '''frugalscore''', '''perplexity'''}
__lowerCAmelCase = importlib.util.find_spec('''transformers''') is not None
def __lowerCamelCase ( lowerCAmelCase_ ) -> Any:
@wraps(lowerCAmelCase_ )
def wrapper(self , lowerCAmelCase_ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
@wraps(lowerCAmelCase_ )
def wrapper(self , lowerCAmelCase_ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
@wraps(lowerCAmelCase_ )
def wrapper(self , lowerCAmelCase_ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __lowerCamelCase ( ) -> Tuple:
_a : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@local
class __magic_name__ ( parameterized.TestCase ):
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Optional[int] = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __lowercase ( self : Dict ,_UpperCAmelCase : Optional[Any] ):
_a : Tuple = '[...]'
_a : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' ,_UpperCAmelCase ) ).module_path )
_a : Optional[int] = datasets.load.import_main_class(metric_module.__name__ ,dataset=_UpperCAmelCase )
# check parameters
_a : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_UpperCAmelCase ,metric_module.__name__ ):
with self.use_local_metrics():
try:
_a : Optional[int] = doctest.testmod(_UpperCAmelCase ,verbose=_UpperCAmelCase ,raise_on_error=_UpperCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed ,0 )
self.assertGreater(results.attempted ,1 )
@slow
def __lowercase ( self : Tuple ,_UpperCAmelCase : Dict ):
_a : Tuple = '[...]'
_a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' ,_UpperCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
_a : int = doctest.testmod(_UpperCAmelCase ,verbose=_UpperCAmelCase ,raise_on_error=_UpperCAmelCase )
self.assertEqual(results.failed ,0 )
self.assertGreater(results.attempted ,1 )
@contextmanager
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_UpperCAmelCase ):
yield
else:
yield
@contextmanager
def __lowercase ( self : Optional[int] ):
def load_local_metric(_UpperCAmelCase : Tuple ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Tuple ):
return load_metric(os.path.join('metrics' ,_UpperCAmelCase ) ,*_UpperCAmelCase ,**_UpperCAmelCase )
with patch('datasets.load_metric' ) as mock_load_metric:
_a : Any = load_local_metric
yield
@classmethod
def __lowercase ( cls : str ,_UpperCAmelCase : List[str] ):
def wrapper(_UpperCAmelCase : int ):
_a : Optional[Any] = contextmanager(_UpperCAmelCase )
_a : Optional[int] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[str]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __magic_name__ ( _UpperCamelCase ):
def __lowercase ( self : int ,_UpperCAmelCase : Union[str, Any] ):
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
_a : int = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
import torch
def bert_cos_score_idf(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
_a : Any = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
def load_from_checkpoint(lowerCAmelCase_ ):
class __magic_name__ :
def __lowercase ( self : str ,_UpperCAmelCase : Dict ,*_UpperCAmelCase : int ,**_UpperCAmelCase : str ):
assert len(_UpperCAmelCase ) == 2
_a : Dict = [0.19, 0.92]
return scores, sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
_a : Any = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
_a : Optional[Any] = load_from_checkpoint
yield
def __lowerCamelCase ( ) -> Tuple:
_a : Dict = load_metric(os.path.join('metrics' , 'seqeval' ) )
_a : Optional[int] = 'ERROR'
_a : Optional[Any] = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(lowerCAmelCase_ , match=re.escape(lowerCAmelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCAmelCase_ )
| 107 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
a : List[str] = pytest.mark.integration
@require_faiss
class a ( _lowerCamelCase ):
def A_ ( self : Union[str, Any] ):
snake_case_ = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(lowercase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : int ):
import faiss
snake_case_ = self._create_dummy_dataset()
snake_case_ = dset.map(
lambda lowercase_ , lowercase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase_ , keep_in_memory=lowercase_ )
snake_case_ = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case_ ,snake_case_ = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def A_ ( self : int ):
import faiss
snake_case_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
snake_case_ ,snake_case_ = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def A_ ( self : Optional[int] ):
import faiss
snake_case_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
snake_case_ ,snake_case_ = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def A_ ( self : Dict ):
snake_case_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : Union[str, Any] ):
from elasticsearch import Elasticsearch
snake_case_ = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
snake_case_ = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
snake_case_ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
snake_case_ = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=lowercase_ )
snake_case_ ,snake_case_ = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class a ( _lowerCamelCase ):
def A_ ( self : str ):
import faiss
snake_case_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
snake_case_ = np.zeros(5 , dtype=np.floataa )
snake_case_ = 1
snake_case_ ,snake_case_ = index.search(lowercase_ )
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
snake_case_ = np.eye(5 , dtype=np.floataa )[::-1]
snake_case_ ,snake_case_ = index.search_batch(lowercase_ )
self.assertRaises(lowercase_ , index.search_batch , queries[0] )
snake_case_ = [scores[0] for scores in total_scores]
snake_case_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_ )
def A_ ( self : Optional[Any] ):
import faiss
snake_case_ = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
snake_case_ = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase_ ):
snake_case_ = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : Optional[Any] ):
import faiss
snake_case_ = faiss.IndexFlat(5 )
snake_case_ = FaissIndex(custom_index=lowercase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : Optional[Any] ):
import faiss
snake_case_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_ ) as tmp_file:
index.save(tmp_file.name )
snake_case_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
snake_case_ = np.zeros(5 , dtype=np.floataa )
snake_case_ = 1
snake_case_ ,snake_case_ = index.search(lowercase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
import faiss
snake_case_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
snake_case_ = '''index.faiss'''
snake_case_ = F"mock://{index_name}"
index.save(__UpperCAmelCase, storage_options=mockfs.storage_options )
snake_case_ = FaissIndex.load(__UpperCAmelCase, storage_options=mockfs.storage_options )
snake_case_ = np.zeros(5, dtype=np.floataa )
snake_case_ = 1
snake_case_ ,snake_case_ = index.search(__UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( _lowerCamelCase ):
def A_ ( self : List[str] ):
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
snake_case_ = Elasticsearch()
snake_case_ = {'''acknowledged''': True}
snake_case_ = ElasticSearchIndex(es_client=lowercase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
snake_case_ = '''foo'''
snake_case_ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
snake_case_ ,snake_case_ = index.search(lowercase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
snake_case_ = '''foo'''
snake_case_ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
snake_case_ ,snake_case_ = index.search(lowercase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
snake_case_ = ['''foo''', '''bar''', '''foobar''']
snake_case_ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
snake_case_ ,snake_case_ = index.search_batch(lowercase_ )
snake_case_ = [scores[0] for scores in total_scores]
snake_case_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase_ )
# batched queries with timeout
snake_case_ = ['''foo''', '''bar''', '''foobar''']
snake_case_ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
snake_case_ ,snake_case_ = index.search_batch(lowercase_ , request_timeout=30 )
snake_case_ = [scores[0] for scores in total_scores]
snake_case_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase_ )
| 56 |
'''simple docstring'''
import math
import unittest
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> str:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _snake_case ( self ) -> List[Any]:
with self.assertRaises(_lowerCAmelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 158 | 0 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] ,_a : bytes ):
'''simple docstring'''
_a : Optional[int] = data
# Initialize hash values
_a : Dict = [
0X6a09_e667,
0Xbb67_ae85,
0X3c6e_f372,
0Xa54f_f53a,
0X510e_527f,
0X9b05_688c,
0X1f83_d9ab,
0X5be0_cd19,
]
# Initialize round constants
_a : Optional[Any] = [
0X428a_2f98,
0X7137_4491,
0Xb5c0_fbcf,
0Xe9b5_dba5,
0X3956_c25b,
0X59f1_11f1,
0X923f_82a4,
0Xab1c_5ed5,
0Xd807_aa98,
0X1283_5b01,
0X2431_85be,
0X550c_7dc3,
0X72be_5d74,
0X80de_b1fe,
0X9bdc_06a7,
0Xc19b_f174,
0Xe49b_69c1,
0Xefbe_4786,
0X0fc1_9dc6,
0X240c_a1cc,
0X2de9_2c6f,
0X4a74_84aa,
0X5cb0_a9dc,
0X76f9_88da,
0X983e_5152,
0Xa831_c66d,
0Xb003_27c8,
0Xbf59_7fc7,
0Xc6e0_0bf3,
0Xd5a7_9147,
0X06ca_6351,
0X1429_2967,
0X27b7_0a85,
0X2e1b_2138,
0X4d2c_6dfc,
0X5338_0d13,
0X650a_7354,
0X766a_0abb,
0X81c2_c92e,
0X9272_2c85,
0Xa2bf_e8a1,
0Xa81a_664b,
0Xc24b_8b70,
0Xc76c_51a3,
0Xd192_e819,
0Xd699_0624,
0Xf40e_3585,
0X106a_a070,
0X19a4_c116,
0X1e37_6c08,
0X2748_774c,
0X34b0_bcb5,
0X391c_0cb3,
0X4ed8_aa4a,
0X5b9c_ca4f,
0X682e_6ff3,
0X748f_82ee,
0X78a5_636f,
0X84c8_7814,
0X8cc7_0208,
0X90be_fffa,
0Xa450_6ceb,
0Xbef9_a3f7,
0Xc671_78f2,
]
_a : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowercase ( _a : bytes ):
'''simple docstring'''
_a : Tuple = B'\x80' + (B'\x00' * (63 - (len(_a ) + 8) % 64))
_a : Optional[int] = struct.pack('>Q' ,(len(_a ) * 8) )
return data + padding + big_endian_integer
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_a : int = list(struct.unpack('>16L' ,_a ) )
# add 48 0-ed integers
words += [0] * 48
_a, _a, _a, _a, _a, _a, _a, _a : int = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_a : Optional[int] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_a : Optional[Any] = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_a : Dict = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
_a : Tuple = self.ror(_a ,6 ) ^ self.ror(_a ,11 ) ^ self.ror(_a ,25 )
_a : List[Any] = (e & f) ^ ((~e & 0Xffff_ffff) & g)
_a : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
_a : List[Any] = self.ror(_a ,2 ) ^ self.ror(_a ,13 ) ^ self.ror(_a ,22 )
_a : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
_a : str = (sa + maj) % 0X1_0000_0000
_a, _a, _a, _a, _a, _a, _a, _a : List[str] = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
_a : Union[str, Any] = [a, b, c, d, e, f, g, h]
# Modify final values
_a : Tuple = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
_a : Tuple = ''.join([hex(_a )[2:].zfill(8 ) for value in self.hashes] )
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : int ):
'''simple docstring'''
return 0Xffff_ffff & (value << (32 - rotations)) | (value >> rotations)
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Dict ):
'''simple docstring'''
import hashlib
_a : str = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_a ).hash ,hashlib.shaaaa(_a ).hexdigest() )
def UpperCAmelCase_ ():
"""simple docstring"""
import doctest
doctest.testmod()
_a : int = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_a : Optional[Any] = parser.parse_args()
_a : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_a : Dict = f.read()
else:
_a : Any = bytes(__a , 'utf-8' )
print(SHAaaa(__a ).hash )
if __name__ == "__main__":
main()
| 5 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 1 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( _UpperCamelCase ):
def __init__( self ) -> int:
'''simple docstring'''
snake_case__ : Dict = []
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.events.append('on_init_end' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> List[str]:
'''simple docstring'''
self.events.append('on_train_begin' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
self.events.append('on_train_end' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> List[str]:
'''simple docstring'''
self.events.append('on_epoch_begin' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Any:
'''simple docstring'''
self.events.append('on_epoch_end' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.events.append('on_step_begin' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Any:
'''simple docstring'''
self.events.append('on_step_end' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.events.append('on_evaluate' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
self.events.append('on_predict' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Any:
'''simple docstring'''
self.events.append('on_save' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.events.append('on_log' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.events.append('on_prediction_step' )
@require_torch
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = tempfile.mkdtemp()
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.output_dir )
def __a ( self , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=64 , __UpperCamelCase=64 , __UpperCamelCase=None , __UpperCamelCase=False , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = RegressionDataset(length=_UpperCAmelCase )
snake_case__ : str = RegressionDataset(length=_UpperCAmelCase )
snake_case__ : List[Any] = RegressionModelConfig(a=_UpperCAmelCase , b=_UpperCAmelCase )
snake_case__ : Optional[int] = RegressionPreTrainedModel(_UpperCAmelCase )
snake_case__ : Optional[int] = TrainingArguments(self.output_dir , disable_tqdm=_UpperCAmelCase , report_to=[] , **_UpperCAmelCase )
return Trainer(
_UpperCAmelCase , _UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , callbacks=_UpperCAmelCase , )
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
# Order doesn't matter
snake_case__ : List[Any] = sorted(_UpperCAmelCase , key=lambda __UpperCamelCase : cb.__name__ if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(_UpperCAmelCase , key=lambda __UpperCamelCase : cb.__name__ if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cb.__class__.__name__ )
for cba, cba in zip(_UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , cba.__class__ )
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(cba.__class__ , _UpperCAmelCase )
else:
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : str = ['on_init_end', 'on_train_begin']
snake_case__ : str = 0
snake_case__ : Optional[int] = len(trainer.get_eval_dataloader() )
snake_case__ : List[Any] = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(_UpperCAmelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = self.get_trainer()
snake_case__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# Callbacks passed at init are added to the default callbacks
snake_case__ : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Union[str, Any] = self.get_trainer(disable_tqdm=_UpperCAmelCase )
snake_case__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : Optional[int] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
snake_case__ : Optional[Any] = self.get_trainer()
snake_case__ : List[Any] = trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(cb.__class__ , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# We can also add, pop, or remove by instance
snake_case__ : str = self.get_trainer()
snake_case__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
snake_case__ : Tuple = self.get_trainer()
snake_case__ : Optional[Any] = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
def __a ( self ) -> Any:
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=_UpperCAmelCase )
snake_case__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# Independent log/save/eval
snake_case__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# A bit of everything
snake_case__ : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
snake_case__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_UpperCAmelCase ) in warn_mock.call_args[0][0]
| 143 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""MobileViTFeatureExtractor"""]
_lowerCamelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
if gpta_config_file == "":
UpperCAmelCase_ : List[str] = GPTaConfig()
else:
UpperCAmelCase_ : int = GPTaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = GPTaModel(_SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 67 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A ) | 308 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase ):
A : List[Any] = FlaxAutoencoderKL
@property
def __lowerCamelCase ( self ):
lowercase : Tuple = 4
lowercase : int = 3
lowercase : Any = (32, 32)
lowercase : Tuple = jax.random.PRNGKey(0 )
lowercase : Dict = jax.random.uniform(SCREAMING_SNAKE_CASE__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __lowerCamelCase ( self ):
lowercase : List[str] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowercase : Dict = self.dummy_input
return init_dict, inputs_dict
| 363 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __lowercase ( _UpperCamelCase = 2000000 ) ->int:
"""simple docstring"""
lowercase : list[int] = [0]
lowercase : int
for idx in range(1, ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
lowercase : int = 0
# an estimate of b, using the quadratic formula
lowercase : float
# the largest integer less than b_estimate
lowercase : int
# the largest integer less than b_estimate
lowercase : int
# the triangle number corresponding to b_floor
lowercase : int
# the triangle number corresponding to b_ceil
lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:], 1 ):
lowercase : List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase : str = floor(_UpperCamelCase )
lowercase : int = ceil(_UpperCamelCase )
lowercase : str = triangle_numbers[b_floor]
lowercase : str = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase : Optional[int] = triangle_b_first_guess * triangle_a
lowercase : Tuple = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase : Dict = triangle_b_second_guess * triangle_a
lowercase : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 173 | 0 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __lowerCAmelCase ( UpperCamelCase__ ) -> tuple:
return (data["data"], data["target"])
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> np.ndarray:
__lowerCamelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase__ , UpperCamelCase__ )
# Predict target for test data
__lowerCamelCase = xgb.predict(UpperCamelCase__ )
__lowerCamelCase = predictions.reshape(len(UpperCamelCase__ ) , 1 )
return predictions
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase = fetch_california_housing()
__lowerCamelCase , __lowerCamelCase = data_handling(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_test_split(
UpperCamelCase__ , UpperCamelCase__ , test_size=0.2_5 , random_state=1 )
__lowerCamelCase = xgboost(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
print(f"""Mean Square Error : {mean_squared_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 67 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
from math import factorial
def lowercase_ ( A__ = 100 ) -> int:
"""simple docstring"""
return sum(int(A__ ) for x in str(factorial(A__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 137 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 137 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
_snake_case : List[str] = """megatron-bert"""
def __init__( self : int , lowerCAmelCase__ : List[str]=29056 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Union[str, Any]=24 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : List[str]=4096 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[Any]=512 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : int=1e-1_2 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Union[str, Any]="absolute" , lowerCAmelCase__ : Any=True , **lowerCAmelCase__ : Optional[int] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
| 324 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
A : Any = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A : List[Any] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
SCREAMING_SNAKE_CASE_ = new_id
# turn into Numpy arrays
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
if reduce_labels:
SCREAMING_SNAKE_CASE_ = 2_5_5
SCREAMING_SNAKE_CASE_ = label - 1
SCREAMING_SNAKE_CASE_ = 2_5_5
SCREAMING_SNAKE_CASE_ = label != ignore_index
SCREAMING_SNAKE_CASE_ = np.not_equal(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = pred_label[mask]
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )[mask]
SCREAMING_SNAKE_CASE_ = pred_label[pred_label == label]
SCREAMING_SNAKE_CASE_ = np.histogram(__UpperCamelCase , bins=__UpperCamelCase , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE_ = np.histogram(__UpperCamelCase , bins=__UpperCamelCase , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE_ = np.histogram(__UpperCamelCase , bins=__UpperCamelCase , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , ):
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = intersect_and_union(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_intersect_and_union(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# compute metrics
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = total_area_intersect.sum() / total_area_label.sum()
SCREAMING_SNAKE_CASE_ = total_area_intersect / total_area_union
SCREAMING_SNAKE_CASE_ = total_area_intersect / total_area_label
SCREAMING_SNAKE_CASE_ = np.nanmean(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.nanmean(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = all_acc
SCREAMING_SNAKE_CASE_ = iou
SCREAMING_SNAKE_CASE_ = acc
if nan_to_num is not None:
SCREAMING_SNAKE_CASE_ = {metric: np.nan_to_num(__UpperCamelCase , nan=__UpperCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def __A ( self : Any , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : bool , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[Dict[int, int]] = None , __magic_name__ : bool = False , ) -> int:
SCREAMING_SNAKE_CASE_ = mean_iou(
results=__magic_name__ , gt_seg_maps=__magic_name__ , num_labels=__magic_name__ , ignore_index=__magic_name__ , nan_to_num=__magic_name__ , label_map=__magic_name__ , reduce_labels=__magic_name__ , )
return iou_result
| 305 | from ....utils import logging
A : List[str] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = config.__dict__
SCREAMING_SNAKE_CASE_ = modal_hidden_size
if num_labels:
SCREAMING_SNAKE_CASE_ = num_labels
| 305 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = "swinv2"
__lowerCamelCase : Tuple = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Any:
super().__init__(**__lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(__lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
_lowerCAmelCase = (0, 0, 0, 0)
| 158 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__(__snake_case ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__() -> Any:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' ,[2, -1] )
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {'''a''': 1, '''b''': 2}
lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {'''a''': 2, '''b''': 3}
lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
| 209 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 351 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCAmelCase : List[str] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[Any] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase__ ( ):
lowercase :int = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
lowercase :Dict = bs[:]
lowercase :List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowercase :List[str] = [chr(lowerCamelCase ) for n in cs]
return dict(zip(lowerCamelCase, lowerCamelCase ) )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[Any] = set()
lowercase :Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase :List[str] = char
return pairs
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any]="replace" , _lowerCAmelCase: int="<s>" , _lowerCAmelCase: int="</s>" , _lowerCAmelCase: int="</s>" , _lowerCAmelCase: Optional[int]="<s>" , _lowerCAmelCase: Optional[int]="<unk>" , _lowerCAmelCase: Any="<pad>" , _lowerCAmelCase: Optional[Any]="<mask>" , _lowerCAmelCase: Union[str, Any]=False , **_lowerCAmelCase: Dict , ):
lowercase :Union[str, Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
lowercase :List[str] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
lowercase :Any = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
lowercase :Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
lowercase :List[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
lowercase :str = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase :Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle:
lowercase :List[str] = json.load(_lowerCAmelCase )
lowercase :Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase :Dict = errors # how to handle errors in decoding
lowercase :Any = bytes_to_unicode()
lowercase :str = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding="utf-8" ) as merges_handle:
lowercase :List[Any] = merges_handle.read().split("\n" )[1:-1]
lowercase :Tuple = [tuple(merge.split() ) for merge in bpe_merges]
lowercase :Optional[int] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase :Tuple = {}
lowercase :List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase :Optional[int] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self: int ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self: Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Optional[int] ):
if token in self.cache:
return self.cache[token]
lowercase :Tuple = tuple(_lowerCAmelCase )
lowercase :List[str] = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
lowercase :List[str] = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase :List[Any] = bigram
lowercase :str = []
lowercase :Tuple = 0
while i < len(_lowerCAmelCase ):
try:
lowercase :List[str] = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase :Optional[Any] = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase :Dict = tuple(_lowerCAmelCase )
lowercase :Optional[Any] = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
lowercase :List[str] = get_pairs(_lowerCAmelCase )
lowercase :str = " ".join(_lowerCAmelCase )
lowercase :Optional[Any] = word
return word
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[int] ):
lowercase :str = []
for token in re.findall(self.pat , _lowerCAmelCase ):
lowercase :str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(" " ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Tuple ):
return self.decoder.get(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ):
lowercase :Optional[int] = "".join(_lowerCAmelCase )
lowercase :List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase :List[str] = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase :Optional[int] = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + "\n" )
lowercase :Tuple = 0
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowercase :Tuple = token_index
writer.write(" ".join(_lowerCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :Tuple = [self.cls_token_id]
lowercase :int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :List[str] = [self.sep_token_id]
lowercase :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int=False , **_lowerCAmelCase: Dict ):
lowercase :Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
lowercase :List[Any] = " " + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Union[Dict[str, EncodedInput], BatchEncoding] , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[bool] = None , ):
lowercase :Tuple = super()._pad(
encoded_inputs=_lowerCAmelCase , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase :Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase :Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase :Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(_lowerCAmelCase )
if needs_to_be_padded:
lowercase :Optional[int] = len(_lowerCAmelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase :List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase :int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 158 | 0 |
'''simple docstring'''
A__: str = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: Optional[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: List[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "data2vec-text"
def __init__( self :str , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Any=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Dict=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :int=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :int=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :str="absolute" , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[Any] =vocab_size
_a : Optional[Any] =hidden_size
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Any =intermediate_size
_a : str =hidden_dropout_prob
_a : Optional[Any] =attention_probs_dropout_prob
_a : Optional[Any] =max_position_embeddings
_a : Union[str, Any] =type_vocab_size
_a : Tuple =initializer_range
_a : Optional[int] =layer_norm_eps
_a : Tuple =position_embedding_type
_a : int =use_cache
_a : List[str] =classifier_dropout
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Tuple ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int = 16 ):
__UpperCamelCase =AutoTokenizer.from_pretrained('bert-base-cased' )
__UpperCamelCase =load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase =datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase =16
elif accelerator.mixed_precision != "no":
__UpperCamelCase =8
else:
__UpperCamelCase =None
return tokenizer.pad(
__lowerCAmelCase , padding='longest' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
__UpperCamelCase =DataLoader(
tokenized_datasets['train'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase )
__UpperCamelCase =DataLoader(
tokenized_datasets['validation'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
# Initialize accelerator
__UpperCamelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase =config['lr']
__UpperCamelCase =int(config['num_epochs'] )
__UpperCamelCase =int(config['seed'] )
__UpperCamelCase =int(config['batch_size'] )
__UpperCamelCase =evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase =batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase =MAX_GPU_BATCH_SIZE
set_seed(__lowerCAmelCase )
__UpperCamelCase , __UpperCamelCase =get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase =AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
__UpperCamelCase =get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase =model(**__lowerCAmelCase )
__UpperCamelCase =outputs.loss
__UpperCamelCase =loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase =model(**__lowerCAmelCase )
__UpperCamelCase =outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
__UpperCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __lowerCAmelCase )
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__UpperCamelCase =parser.parse_args()
__UpperCamelCase ={'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 353 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "The column name of the images in the files."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _a ( self ) -> Optional[int]:
__UpperCamelCase ={}
if self.train_dir is not None:
__UpperCamelCase =self.train_dir
if self.validation_dir is not None:
__UpperCamelCase =self.validation_dir
__UpperCamelCase =data_files if data_files else None
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str = field(
default=A_ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
UpperCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase__ : float = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : float = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCamelCase =training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__UpperCamelCase =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__UpperCamelCase =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCamelCase =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0:
__UpperCamelCase =ds['train'].train_test_split(data_args.train_val_split )
__UpperCamelCase =split['train']
__UpperCamelCase =split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
__UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
__UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__UpperCamelCase =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__UpperCamelCase =ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
if training_args.do_train:
__UpperCamelCase =ds['train'].column_names
else:
__UpperCamelCase =ds['validation'].column_names
if data_args.image_column_name is not None:
__UpperCamelCase =data_args.image_column_name
elif "image" in column_names:
__UpperCamelCase ='image'
elif "img" in column_names:
__UpperCamelCase ='img'
else:
__UpperCamelCase =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__UpperCamelCase =image_processor.size['shortest_edge']
else:
__UpperCamelCase =(image_processor.size['height'], image_processor.size['width'])
__UpperCamelCase =Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =[transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__UpperCamelCase =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__UpperCamelCase =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ )
# Compute absolute learning rate
__UpperCamelCase =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__UpperCamelCase =training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
__UpperCamelCase =Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
__UpperCamelCase =None
if training_args.resume_from_checkpoint is not None:
__UpperCamelCase =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCamelCase =last_checkpoint
__UpperCamelCase =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCamelCase =trainer.evaluate()
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
__UpperCamelCase ={
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 117 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['MobileViTFeatureExtractor']
a__ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 235 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = '''convbert'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Dict=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = vocab_size
a_ : List[str] = hidden_size
a_ : List[str] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Optional[int] = intermediate_size
a_ : int = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : List[str] = initializer_range
a_ : Tuple = layer_norm_eps
a_ : Optional[int] = embedding_size
a_ : List[Any] = head_ratio
a_ : List[Any] = conv_kernel_size
a_ : Tuple = num_groups
a_ : Tuple = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a_ : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 32 | 0 |
"""simple docstring"""
def lowercase ( a__ : int ) -> "list[int]":
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
_UpperCamelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_UpperCamelCase = 1
if upper_limit > 0:
_UpperCamelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 54 | """simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''OwlViTImageProcessor'''
snake_case__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Any , __UpperCamelCase : int=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : List[str] ) -> Union[str, Any]:
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCamelCase , )
_UpperCamelCase = kwargs.pop('''feature_extractor''' )
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : List[str] , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]="max_length" , __UpperCamelCase : List[Any]="np" , **__UpperCamelCase : Optional[int] ) -> Optional[int]:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__UpperCamelCase , __UpperCamelCase ) or (isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(text[0] , __UpperCamelCase )):
_UpperCamelCase = [self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )]
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(text[0] , __UpperCamelCase ):
_UpperCamelCase = []
# Maximum number of queries across batch
_UpperCamelCase = max([len(__UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCamelCase ) != max_num_queries:
_UpperCamelCase = t + [''' '''] * (max_num_queries - len(__UpperCamelCase ))
_UpperCamelCase = self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
encodings.append(__UpperCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
_UpperCamelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCamelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCamelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
_UpperCamelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCamelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
_UpperCamelCase = BatchEncoding()
_UpperCamelCase = input_ids
_UpperCamelCase = attention_mask
if query_images is not None:
_UpperCamelCase = BatchEncoding()
_UpperCamelCase = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ).pixel_values
_UpperCamelCase = query_pixel_values
if images is not None:
_UpperCamelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def _UpperCamelCase ( self : str , *__UpperCamelCase : str , **__UpperCamelCase : str ) -> List[Any]:
return self.image_processor.post_process(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[Any] ) -> Optional[int]:
return self.image_processor.post_process_object_detection(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ) -> int:
return self.image_processor.post_process_image_guided_detection(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Any ) -> str:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[Any] ) -> List[str]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCamelCase , )
return self.image_processor_class
@property
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCamelCase , )
return self.image_processor
| 54 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : Optional[int] = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
lowerCAmelCase__ : Tuple = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
lowerCAmelCase__ : Optional[Any] = model(lowercase_ )['''last_hidden_state''']
lowerCAmelCase__ : Any = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape ,lowercase_ )
# compare the actual values for a slice.
lowerCAmelCase__ : Optional[int] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 106 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''biogpt'''
def __init__(self , __magic_name__=4_2384 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1024 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , **__magic_name__ , ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : Optional[int] = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Optional[int] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = scale_embedding
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = layerdrop
snake_case_ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 279 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : Optional[int] = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase_ : bool = field(
default=_a, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
}, )
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : str = field(
default=_a, metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase_ : str = field(
default=_a, metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowerCamelCase_ : Optional[bool] = field(
default=_a, metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
lowerCamelCase_ : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''}, )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : List[Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
snake_case_ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ : str = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Optional[int] = train_dataset.features['''label'''].names
if training_args.do_eval:
snake_case_ : Dict = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Tuple = eval_dataset.features['''label'''].names
if training_args.do_predict:
snake_case_ : int = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Optional[int] = predict_dataset.features['''label'''].names
# Labels
snake_case_ : int = len(_UpperCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCamelCase , idalabel={str(_UpperCamelCase ): label for i, label in enumerate(_UpperCamelCase )} , labelaid={label: i for i, label in enumerate(_UpperCamelCase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
snake_case_ : Dict = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case_ : str = False
def preprocess_function(_UpperCamelCase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=_UpperCamelCase , max_length=data_args.max_seq_length , truncation=_UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ : List[Any] = min(len(_UpperCamelCase ) , data_args.max_train_samples )
snake_case_ : int = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
snake_case_ : Optional[int] = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_UpperCamelCase ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ : List[str] = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
snake_case_ : List[str] = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
snake_case_ : List[str] = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , data_args.max_predict_samples )
snake_case_ : Dict = predict_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
snake_case_ : List[str] = predict_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
snake_case_ : int = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
snake_case_ : List[str] = p.predictions[0] if isinstance(p.predictions , _UpperCamelCase ) else p.predictions
snake_case_ : Tuple = np.argmax(_UpperCamelCase , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case_ : Optional[int] = default_data_collator
elif training_args.fpaa:
snake_case_ : Any = DataCollatorWithPadding(_UpperCamelCase , pad_to_multiple_of=8 )
else:
snake_case_ : Any = None
# Initialize our Trainer
snake_case_ : Any = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
snake_case_ : int = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : Dict = last_checkpoint
snake_case_ : int = trainer.train(resume_from_checkpoint=_UpperCamelCase )
snake_case_ : Union[str, Any] = train_result.metrics
snake_case_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
snake_case_ : Dict = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _UpperCamelCase )
trainer.save_metrics('''train''' , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ : Any = trainer.evaluate(eval_dataset=_UpperCamelCase )
snake_case_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
snake_case_ : str = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''eval''' , _UpperCamelCase )
trainer.save_metrics('''eval''' , _UpperCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
snake_case_ , snake_case_ , snake_case_ : Optional[int] = trainer.predict(_UpperCamelCase , metric_key_prefix='''predict''' )
snake_case_ : Union[str, Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_UpperCamelCase )
)
snake_case_ : Optional[int] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''predict''' , _UpperCamelCase )
trainer.save_metrics('''predict''' , _UpperCamelCase )
snake_case_ : List[Any] = np.argmax(_UpperCamelCase , axis=1 )
snake_case_ : Optional[Any] = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(_UpperCamelCase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_UpperCamelCase ):
snake_case_ : List[str] = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 279 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__lowerCAmelCase : str = 'CompVis/stable-diffusion-v1-1'
__lowerCAmelCase : int = 'CompVis/stable-diffusion-v1-2'
__lowerCAmelCase : List[str] = 'CompVis/stable-diffusion-v1-3'
__lowerCAmelCase : str = 'CompVis/stable-diffusion-v1-4'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : StableDiffusionSafetyChecker , __lowerCamelCase : CLIPImageProcessor , __lowerCamelCase : bool = True , ) -> List[Any]:
super()._init_()
a = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
a = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
a = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
a = StableDiffusionPipeline(
vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , requires_safety_checker=__lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Dict[str, Any]:
return {k: getattr(self , __lowerCamelCase ) for k in self.config.keys() if not k.startswith("_" )}
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
self.enable_attention_slicing(__lowerCamelCase )
@torch.no_grad()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : str , ) -> Union[str, Any]:
return self.pipea(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
@torch.no_grad()
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Any , ) -> Tuple:
return self.pipea(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
@torch.no_grad()
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : int , ) -> List[str]:
return self.pipea(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
@torch.no_grad()
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Tuple , ) -> str:
return self.pipea(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
@torch.no_grad()
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : List[Any] , ) -> List[str]:
a = "cuda" if torch.cuda.is_available() else "cpu"
self.to(__lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a = self.textaimg_sda_a(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
a = self.textaimg_sda_a(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
a = self.textaimg_sda_a(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
a = self.textaimg_sda_a(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 107 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCAmelCase : List[Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( A : Dict, A : Union[str, Any], A : Optional[int]=None ):
'''simple docstring'''
if rng is None:
a = random.Random()
a = 1
for dim in shape:
total_dims *= dim
a = []
for _ in range(A ):
values.append(rng.randint(0, vocab_size - 1 ) )
a = np.array(A, dtype=jnp.intaa ).reshape(A )
return output
def __magic_name__ ( A : Dict, A : Union[str, Any]=None ):
'''simple docstring'''
a = ids_tensor(A, vocab_size=2, rng=A )
# make sure that at least one token is attended to for each batch
a = 1
return attn_mask
@require_flax
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Any = ()
def __UpperCAmelCase ( self : int ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a = 2
a = inputs["input_ids"].shape[-1] // 2
a = inputs["input_ids"][:max_batch_size, :sequence_length]
a = jnp.ones_like(__lowerCamelCase )
a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 0
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model_class.__name__[4:] # Skip the "Flax" at the beginning
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = pt_model_class(__lowerCamelCase ).eval()
a = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
a = flax_model.generate(__lowerCamelCase ).sequences
a = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : int ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
a = 0.8
a = 10
a = 0.3
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 2
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = 2
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = "Hello world"
a = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , "foo" ):
a = {"foo": "bar"}
model.generate(__lowerCamelCase , **__lowerCamelCase )
| 107 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 361 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "deta"
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Any , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=900 , UpperCAmelCase : str=2048 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : str=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : int=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : List[Any]=0.0_2 , UpperCAmelCase : Optional[Any]=1.0 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=False , UpperCAmelCase : Optional[int]="sine" , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=300 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : str=5 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=1 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Tuple=0.2_5 , **UpperCAmelCase : List[str] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCamelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : List[Any] = backbone_config.pop("model_type" )
__lowerCamelCase : Any = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : List[Any] = config_class.from_dict(UpperCAmelCase )
__lowerCamelCase : List[str] = backbone_config
__lowerCamelCase : Any = num_queries
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Dict = d_model
__lowerCamelCase : List[str] = encoder_ffn_dim
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : str = encoder_attention_heads
__lowerCamelCase : Dict = decoder_ffn_dim
__lowerCamelCase : Tuple = decoder_layers
__lowerCamelCase : str = decoder_attention_heads
__lowerCamelCase : Dict = dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : int = activation_dropout
__lowerCamelCase : int = activation_function
__lowerCamelCase : Any = init_std
__lowerCamelCase : Optional[Any] = init_xavier_std
__lowerCamelCase : int = encoder_layerdrop
__lowerCamelCase : Dict = auxiliary_loss
__lowerCamelCase : Optional[int] = position_embedding_type
# deformable attributes
__lowerCamelCase : Tuple = num_feature_levels
__lowerCamelCase : str = encoder_n_points
__lowerCamelCase : List[str] = decoder_n_points
__lowerCamelCase : List[str] = two_stage
__lowerCamelCase : Dict = two_stage_num_proposals
__lowerCamelCase : int = with_box_refine
__lowerCamelCase : Union[str, Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__lowerCamelCase : str = class_cost
__lowerCamelCase : Optional[Any] = bbox_cost
__lowerCamelCase : Tuple = giou_cost
# Loss coefficients
__lowerCamelCase : List[Any] = mask_loss_coefficient
__lowerCamelCase : Dict = dice_loss_coefficient
__lowerCamelCase : Any = bbox_loss_coefficient
__lowerCamelCase : Dict = giou_loss_coefficient
__lowerCamelCase : Optional[Any] = eos_coefficient
__lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Dict ):
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.d_model
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Dict = self.backbone_config.to_dict()
__lowerCamelCase : str = self.__class__.model_type
return output | 64 | 0 |
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> None:
_lowercase =data
# Initialize hash values
_lowercase =[
0X6_a_0_9_e_6_6_7,
0Xb_b_6_7_a_e_8_5,
0X3_c_6_e_f_3_7_2,
0Xa_5_4_f_f_5_3_a,
0X5_1_0_e_5_2_7_f,
0X9_b_0_5_6_8_8_c,
0X1_f_8_3_d_9_a_b,
0X5_b_e_0_c_d_1_9,
]
# Initialize round constants
_lowercase =[
0X4_2_8_a_2_f_9_8,
0X7_1_3_7_4_4_9_1,
0Xb_5_c_0_f_b_c_f,
0Xe_9_b_5_d_b_a_5,
0X3_9_5_6_c_2_5_b,
0X5_9_f_1_1_1_f_1,
0X9_2_3_f_8_2_a_4,
0Xa_b_1_c_5_e_d_5,
0Xd_8_0_7_a_a_9_8,
0X1_2_8_3_5_b_0_1,
0X2_4_3_1_8_5_b_e,
0X5_5_0_c_7_d_c_3,
0X7_2_b_e_5_d_7_4,
0X8_0_d_e_b_1_f_e,
0X9_b_d_c_0_6_a_7,
0Xc_1_9_b_f_1_7_4,
0Xe_4_9_b_6_9_c_1,
0Xe_f_b_e_4_7_8_6,
0X0_f_c_1_9_d_c_6,
0X2_4_0_c_a_1_c_c,
0X2_d_e_9_2_c_6_f,
0X4_a_7_4_8_4_a_a,
0X5_c_b_0_a_9_d_c,
0X7_6_f_9_8_8_d_a,
0X9_8_3_e_5_1_5_2,
0Xa_8_3_1_c_6_6_d,
0Xb_0_0_3_2_7_c_8,
0Xb_f_5_9_7_f_c_7,
0Xc_6_e_0_0_b_f_3,
0Xd_5_a_7_9_1_4_7,
0X0_6_c_a_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_b_7_0_a_8_5,
0X2_e_1_b_2_1_3_8,
0X4_d_2_c_6_d_f_c,
0X5_3_3_8_0_d_1_3,
0X6_5_0_a_7_3_5_4,
0X7_6_6_a_0_a_b_b,
0X8_1_c_2_c_9_2_e,
0X9_2_7_2_2_c_8_5,
0Xa_2_b_f_e_8_a_1,
0Xa_8_1_a_6_6_4_b,
0Xc_2_4_b_8_b_7_0,
0Xc_7_6_c_5_1_a_3,
0Xd_1_9_2_e_8_1_9,
0Xd_6_9_9_0_6_2_4,
0Xf_4_0_e_3_5_8_5,
0X1_0_6_a_a_0_7_0,
0X1_9_a_4_c_1_1_6,
0X1_e_3_7_6_c_0_8,
0X2_7_4_8_7_7_4_c,
0X3_4_b_0_b_c_b_5,
0X3_9_1_c_0_c_b_3,
0X4_e_d_8_a_a_4_a,
0X5_b_9_c_c_a_4_f,
0X6_8_2_e_6_f_f_3,
0X7_4_8_f_8_2_e_e,
0X7_8_a_5_6_3_6_f,
0X8_4_c_8_7_8_1_4,
0X8_c_c_7_0_2_0_8,
0X9_0_b_e_f_f_f_a,
0Xa_4_5_0_6_c_e_b,
0Xb_e_f_9_a_3_f_7,
0Xc_6_7_1_7_8_f_2,
]
_lowercase =self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __A (UpperCAmelCase ) -> bytes:
_lowercase =B'''\x80''' + (B'''\x00''' * (6_3 - (len(UpperCAmelCase ) + 8) % 6_4))
_lowercase =struct.pack('''>Q''' , (len(UpperCAmelCase ) * 8) )
return data + padding + big_endian_integer
def __A (self ) -> None:
# Convert into blocks of 64 bytes
_lowercase =[
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowercase =list(struct.unpack('''>16L''' , UpperCAmelCase ) )
# add 48 0-ed integers
words += [0] * 4_8
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
_lowercase =(
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
_lowercase =(
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
_lowercase =(
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
_lowercase =self.ror(UpperCAmelCase , 6 ) ^ self.ror(UpperCAmelCase , 1_1 ) ^ self.ror(UpperCAmelCase , 2_5 )
_lowercase =(e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g)
_lowercase =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
_lowercase =self.ror(UpperCAmelCase , 2 ) ^ self.ror(UpperCAmelCase , 1_3 ) ^ self.ror(UpperCAmelCase , 2_2 )
_lowercase =(a & b) ^ (a & c) ^ (b & c)
_lowercase =(sa + maj) % 0X1_0_0_0_0_0_0_0_0
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =(
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
_lowercase =[a, b, c, d, e, f, g, h]
# Modify final values
_lowercase =[
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
_lowercase =''''''.join([hex(UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int:
return 0Xf_f_f_f_f_f_f_f & (value << (3_2 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> None:
import hashlib
_lowercase =bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(UpperCAmelCase ).hash , hashlib.shaaaa(UpperCAmelCase ).hexdigest() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
_lowercase =argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
_lowercase =parser.parse_args()
_lowercase =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
_lowercase =f.read()
else:
_lowercase =bytes(__snake_case , '''utf-8''' )
print(SHAaaa(__snake_case ).hash )
if __name__ == "__main__":
main()
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_( A : int):
random.seed(A)
np.random.seed(A)
torch.manual_seed(A)
torch.cuda.manual_seed_all(A)
# ^^ safe to call this function even if cuda is not available
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_ = 0.9_999 , A_ = 0.0 , A_ = 0 , A_ = False , A_ = 1.0 , A_ = 2 / 3 , A_ = None , A_ = None , **A_ , )-> List[str]:
'''simple docstring'''
if isinstance(A_ , torch.nn.Module ):
UpperCamelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , A_ , standard_warn=A_ , )
UpperCamelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCamelCase = True
if kwargs.get('max_value' , A_ ) is not None:
UpperCamelCase = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , A_ , standard_warn=A_ )
UpperCamelCase = kwargs['max_value']
if kwargs.get('min_value' , A_ ) is not None:
UpperCamelCase = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , A_ , standard_warn=A_ )
UpperCamelCase = kwargs['min_value']
UpperCamelCase = list(A_ )
UpperCamelCase = [p.clone().detach() for p in parameters]
if kwargs.get('device' , A_ ) is not None:
UpperCamelCase = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , A_ , standard_warn=A_ )
self.to(device=kwargs['device'] )
UpperCamelCase = None
UpperCamelCase = decay
UpperCamelCase = min_decay
UpperCamelCase = update_after_step
UpperCamelCase = use_ema_warmup
UpperCamelCase = inv_gamma
UpperCamelCase = power
UpperCamelCase = 0
UpperCamelCase = None # set in `step()`
UpperCamelCase = model_cls
UpperCamelCase = model_config
@classmethod
def UpperCAmelCase_ ( cls , A_ , A_ )-> "EMAModel":
'''simple docstring'''
UpperCamelCase , UpperCamelCase = model_cls.load_config(A_ , return_unused_kwargs=A_ )
UpperCamelCase = model_cls.from_pretrained(A_ )
UpperCamelCase = cls(model.parameters() , model_cls=A_ , model_config=model.config )
ema_model.load_state_dict(A_ )
return ema_model
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
UpperCamelCase = self.model_cls.from_config(self.model_config )
UpperCamelCase = self.state_dict()
state_dict.pop('shadow_params' , A_ )
model.register_to_config(**A_ )
self.copy_to(model.parameters() )
model.save_pretrained(A_ )
def UpperCAmelCase_ ( self , A_ )-> float:
'''simple docstring'''
UpperCamelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCamelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCamelCase = (1 + step) / (10 + step)
UpperCamelCase = min(A_ , self.decay )
# make sure decay is not smaller than min_decay
UpperCamelCase = max(A_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
if isinstance(A_ , torch.nn.Module ):
UpperCamelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , A_ , standard_warn=A_ , )
UpperCamelCase = parameters.parameters()
UpperCamelCase = list(A_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCamelCase = self.get_decay(self.optimization_step )
UpperCamelCase = decay
UpperCamelCase = 1 - decay
UpperCamelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , A_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCamelCase = deepspeed.zero.GatheredParameters(A_ , modifier_rank=A_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(A_ )
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = list(A_ )
for s_param, param in zip(self.shadow_params , A_ ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCAmelCase_ ( self , A_=None , A_=None )-> None:
'''simple docstring'''
UpperCamelCase = [
p.to(device=A_ , dtype=A_ ) if p.is_floating_point() else p.to(device=A_ )
for p in self.shadow_params
]
def UpperCAmelCase_ ( self )-> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = [param.detach().cpu().clone() for param in parameters]
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , A_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = copy.deepcopy(A_ )
UpperCamelCase = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
UpperCamelCase = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , A_ ):
raise ValueError('Invalid min_decay' )
UpperCamelCase = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , A_ ):
raise ValueError('Invalid optimization_step' )
UpperCamelCase = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , A_ ):
raise ValueError('Invalid update_after_step' )
UpperCamelCase = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , A_ ):
raise ValueError('Invalid use_ema_warmup' )
UpperCamelCase = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
UpperCamelCase = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
UpperCamelCase = state_dict.get('shadow_params' , A_ )
if shadow_params is not None:
UpperCamelCase = shadow_params
if not isinstance(self.shadow_params , A_ ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(A_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 251 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = BlenderbotConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , )-> List[Any]:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def UpperCAmelCase_ ( self , A_ , A_ )-> int:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModel(config=A_ ).get_decoder()
UpperCamelCase = inputs_dict['input_ids']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['attention_mask'][:1, :]
UpperCamelCase = inputs_dict['head_mask']
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(A_ , attention_mask=A_ )[0]
UpperCamelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def A_( A : List[Any] , A : Tuple , A : Optional[Any] , A : List[str]=None , A : str=None , A : List[Any]=None , A : Dict=None , A : Any=None , ):
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(A , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = ["""My friends are cool but they eat too many carbs."""]
lowerCAmelCase_ = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 251 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[Any] = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 | '''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __A :
a__ : Union[str, Any] = None
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = os.path.join(__a , "feat_extract.json" )
feat_extract_first.to_json_file(__a )
UpperCAmelCase_ = self.feature_extraction_class.from_json_file(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowercase (self : int ):
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
UpperCAmelCase_ = self.feature_extraction_class.from_pretrained(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.feature_extraction_class()
self.assertIsNotNone(__a )
| 106 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Optional[Any] = """resnet"""
a__ : Tuple = ["""basic""", """bottleneck"""]
def __init__(self : List[Any] , __a : Any=3 , __a : Dict=64 , __a : Union[str, Any]=[256, 512, 1024, 2048] , __a : str=[3, 4, 6, 3] , __a : Optional[Any]="bottleneck" , __a : Tuple="relu" , __a : int=False , __a : Optional[int]=None , __a : str=None , **__a : Dict , ):
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = downsample_in_first_stage
UpperCAmelCase_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class __A ( UpperCamelCase__ ):
a__ : int = version.parse("""1.11""" )
@property
def _lowercase (self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase (self : str ):
return 1E-3
| 106 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = StableDiffusionDiffEditPipeline
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
lowercase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ = frozenset([] )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Union[str, Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase_ , )
lowerCamelCase__: List[str] =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
lowerCamelCase__: Optional[Any] =DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_zero=UpperCAmelCase_ , )
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
lowerCamelCase__: Dict =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__: Dict =CLIPTextModel(UpperCAmelCase_)
lowerCamelCase__: List[Any] =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
lowerCamelCase__: str ={
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=0) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =floats_tensor((1, 16, 16) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
lowerCamelCase__: Tuple =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: List[Any] =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Optional[int] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=0) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1)[0]
lowerCamelCase__: Optional[int] =Image.fromarray(np.uinta(UpperCAmelCase_)).convert("RGB")
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Union[str, Any] =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Union[str, Any] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: List[str] ={
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]=0) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
lowerCamelCase__: str =image.cpu().permute(0 , 2 , 3 , 1)[0]
lowerCamelCase__: Any =Image.fromarray(np.uinta(UpperCAmelCase_)).convert("RGB")
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Dict =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: int =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: Dict ={
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
if not hasattr(self.pipeline_class , "_optional_components"):
return
lowerCamelCase__: Dict =self.get_dummy_components()
lowerCamelCase__: Optional[Any] =self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
lowerCamelCase__: Union[str, Any] =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: int =self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase__: Optional[int] =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: int =pipe_loaded(**UpperCAmelCase_)[0]
lowerCamelCase__: int =np.abs(output - output_loaded).max()
self.assertLess(UpperCAmelCase_ , 1E-4)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] ="cpu"
lowerCamelCase__: List[Any] =self.get_dummy_components()
lowerCamelCase__: int =self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.get_dummy_mask_inputs(UpperCAmelCase_)
lowerCamelCase__: Dict =pipe.generate_mask(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
lowerCamelCase__: int =np.array([0] * 9)
lowerCamelCase__: Tuple =np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCAmelCase_ , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str ="cpu"
lowerCamelCase__: Optional[Any] =self.get_dummy_components()
lowerCamelCase__: Optional[Any] =self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Tuple =self.get_dummy_inversion_inputs(UpperCAmelCase_)
lowerCamelCase__: List[Any] =pipe.invert(**UpperCAmelCase_).images
lowerCamelCase__: Union[str, Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
lowerCamelCase__: int =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
lowerCamelCase__: Optional[int] =np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCAmelCase_ , 1E-3)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ="cpu"
lowerCamelCase__: Any =self.get_dummy_components()
lowerCamelCase__: List[Any] ={"beta_start": 0.0_0085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
lowerCamelCase__: List[Any] =DPMSolverMultistepScheduler(**UpperCAmelCase_)
lowerCamelCase__: Any =DPMSolverMultistepInverseScheduler(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Dict =self.get_dummy_inversion_inputs(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =pipe.invert(**UpperCAmelCase_).images
lowerCamelCase__: List[Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
lowerCamelCase__: Optional[int] =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
lowerCamelCase__: int =np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCAmelCase_ , 1E-3)
@require_torch_gpu
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Any) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int]) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png")
lowerCamelCase__: Optional[int] =raw_image.convert("RGB").resize((768, 768))
lowerCamelCase__: Any =raw_image
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =torch.manual_seed(0)
lowerCamelCase__: List[str] =StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa)
lowerCamelCase__: int =DDIMScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__: int =DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] ="a bowl of fruit"
lowerCamelCase__: List[str] ="a bowl of pears"
lowerCamelCase__: Tuple =pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase_ , target_prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , )
lowerCamelCase__: Optional[Any] =pipe.invert(
prompt=UpperCAmelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase_).latents
lowerCamelCase__: Optional[int] =pipe(
prompt=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_latents=UpperCAmelCase_ , generator=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase__: int =(
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png").resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
lowerCamelCase__: str =torch.manual_seed(0)
lowerCamelCase__: Optional[Any] =StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa)
lowerCamelCase__: Dict =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__: List[str] =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Dict ="a bowl of fruit"
lowerCamelCase__: Optional[Any] ="a bowl of pears"
lowerCamelCase__: int =pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase_ , target_prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , )
lowerCamelCase__: List[str] =pipe.invert(
prompt=UpperCAmelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase_ , num_inference_steps=25 , ).latents
lowerCamelCase__: List[Any] =pipe(
prompt=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_latents=UpperCAmelCase_ , generator=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase__: List[Any] =(
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png").resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 10 |
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(lowercase )
# Initialize Result
SCREAMING_SNAKE_CASE_: str =[]
# Traverse through all denomination
for denomination in reversed(lowercase ):
# Find denominations
while int(lowercase ) >= int(lowercase ):
total_value -= int(lowercase )
answer.append(lowercase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_UpperCAmelCase = []
_UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
_UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
_UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
_UpperCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f"""Following is minimal change for {value}: """)
_UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 173 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowercase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : List[Any] , _lowercase : GenericTensor ):
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a ( self : List[str] , _lowercase : GenericTensor ):
__UpperCAmelCase = self.get_masked_index(_lowercase )
__UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def a ( self : Optional[int] , _lowercase : GenericTensor ):
if isinstance(_lowercase , _lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowercase )
def a ( self : List[str] , _lowercase : Optional[int] , _lowercase : Tuple=None , **_lowercase : Tuple ):
if return_tensors is None:
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase )
self.ensure_exactly_one_mask_token(_lowercase )
return model_inputs
def a ( self : Optional[int] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
__UpperCAmelCase = model_inputs['''input_ids''']
return model_outputs
def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any]=5 , _lowercase : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCAmelCase = target_ids.shape[0]
__UpperCAmelCase = model_outputs['''input_ids'''][0]
__UpperCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__UpperCAmelCase = outputs.numpy()
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = stable_softmax(_lowercase , axis=-1 )
if target_ids is not None:
__UpperCAmelCase = tf.gather_nd(tf.squeeze(_lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
__UpperCAmelCase = tf.expand_dims(_lowercase , 0 )
__UpperCAmelCase = tf.math.top_k(_lowercase , k=_lowercase )
__UpperCAmelCase , __UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__UpperCAmelCase = probs[..., target_ids]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
__UpperCAmelCase = []
__UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
__UpperCAmelCase = target_ids[p].tolist()
__UpperCAmelCase = p
# Filter padding out:
__UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_lowercase )
result.append(_lowercase )
if single_mask:
return result[0]
return result
def a ( self : str , _lowercase : List[Any] , _lowercase : List[Any]=None ):
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = [targets]
try:
__UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
__UpperCAmelCase = {}
__UpperCAmelCase = []
for target in targets:
__UpperCAmelCase = vocab.get(_lowercase , _lowercase )
if id_ is None:
__UpperCAmelCase = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , max_length=1 , truncation=_lowercase , )['''input_ids''']
if len(_lowercase ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__UpperCAmelCase = list(set(_lowercase ) )
if len(_lowercase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__UpperCAmelCase = np.array(_lowercase )
return target_ids
def a ( self : int , _lowercase : Dict=None , _lowercase : Optional[Any]=None ):
__UpperCAmelCase = {}
if targets is not None:
__UpperCAmelCase = self.get_target_ids(_lowercase , _lowercase )
__UpperCAmelCase = target_ids
if top_k is not None:
__UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : Union[str, Any] , _lowercase : Optional[Any] , *_lowercase : Union[str, Any] , **_lowercase : int ):
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
| 86 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a_ : Tuple = logging.get_logger(__name__)
a_ : str = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
a_ : Tuple = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
a_ : str = {
'jukebox': 5_12,
}
class _snake_case ( A__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , a , a , a , a=["v3", "v2", "v2"] , a=512 , a=5 , a="<|endoftext|>" , **a , ) -> List[Any]:
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else unk_token
super().__init__(
unk_token=a , n_genres=a , version=a , max_n_lyric_tokens=a , **a , )
SCREAMING_SNAKE_CASE = version
SCREAMING_SNAKE_CASE = max_n_lyric_tokens
SCREAMING_SNAKE_CASE = n_genres
with open(a , encoding='utf-8') as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(a)
with open(a , encoding='utf-8') as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(a)
with open(a , encoding='utf-8') as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(a)
SCREAMING_SNAKE_CASE = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 79:
SCREAMING_SNAKE_CASE = oov.replace(R'\-\'' , R'\-+\'')
SCREAMING_SNAKE_CASE = regex.compile(a)
SCREAMING_SNAKE_CASE = {v: k for k, v in self.artists_encoder.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.genres_encoder.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = [self.artists_encoder.get(a , 0) for artist in list_artists]
for genres in range(len(a)):
SCREAMING_SNAKE_CASE = [self.genres_encoder.get(a , 0) for genre in list_genres[genres]]
SCREAMING_SNAKE_CASE = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
SCREAMING_SNAKE_CASE = [[self.lyrics_encoder.get(a , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
return list(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , **a) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_for_tokenization(a , a , a)
SCREAMING_SNAKE_CASE = self._tokenize(a)
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a = False) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version)):
if self.version[idx] == "v3":
SCREAMING_SNAKE_CASE = artists[idx].lower()
SCREAMING_SNAKE_CASE = [genres[idx].lower()]
else:
SCREAMING_SNAKE_CASE = self._normalize(artists[idx]) + '.v2'
SCREAMING_SNAKE_CASE = [
self._normalize(a) + '.v2' for genre in genres[idx].split('_')
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
SCREAMING_SNAKE_CASE = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+')
SCREAMING_SNAKE_CASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
SCREAMING_SNAKE_CASE = {vocab[index]: index + 1 for index in range(len(a))}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(a) + 1
SCREAMING_SNAKE_CASE = self.vocab
SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+')
SCREAMING_SNAKE_CASE = self._run_strip_accents(a)
SCREAMING_SNAKE_CASE = lyrics.replace('\\' , '\n')
SCREAMING_SNAKE_CASE = self.out_of_vocab.sub('' , a), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE = unicodedata.normalize('NFD' , a)
SCREAMING_SNAKE_CASE = []
for char in text:
SCREAMING_SNAKE_CASE = unicodedata.category(a)
if cat == "Mn":
continue
output.append(a)
return "".join(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = (
[chr(a) for i in range(ord('a') , ord('z') + 1)]
+ [chr(a) for i in range(ord('A') , ord('Z') + 1)]
+ [chr(a) for i in range(ord('0') , ord('9') + 1)]
+ ['.']
)
SCREAMING_SNAKE_CASE = frozenset(a)
SCREAMING_SNAKE_CASE = re.compile(R'_+')
SCREAMING_SNAKE_CASE = ''.join([c if c in accepted else '_' for c in text.lower()])
SCREAMING_SNAKE_CASE = pattern.sub('_' , a).strip('_')
return text
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
return " ".join(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> Any:
# Convert to TensorType
if not isinstance(a , a):
SCREAMING_SNAKE_CASE = TensorType(a)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.')
import tensorflow as tf
SCREAMING_SNAKE_CASE = tf.constant
SCREAMING_SNAKE_CASE = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.')
import torch
SCREAMING_SNAKE_CASE = torch.tensor
SCREAMING_SNAKE_CASE = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.')
import jax.numpy as jnp # noqa: F811
SCREAMING_SNAKE_CASE = jnp.array
SCREAMING_SNAKE_CASE = _is_jax
else:
SCREAMING_SNAKE_CASE = np.asarray
SCREAMING_SNAKE_CASE = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
SCREAMING_SNAKE_CASE = [inputs]
if not is_tensor(a):
SCREAMING_SNAKE_CASE = as_tensor(a)
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.')
return inputs
def __call__( self , a , a , a="" , a="pt") -> BatchEncoding:
SCREAMING_SNAKE_CASE = [0, 0, 0]
SCREAMING_SNAKE_CASE = [artist] * len(self.version)
SCREAMING_SNAKE_CASE = [genres] * len(self.version)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.tokenize(a , a , a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._convert_token_to_id(a , a , a)
SCREAMING_SNAKE_CASE = [-INFINITY] * len(full_tokens[-1])
SCREAMING_SNAKE_CASE = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=a)
for i in range(len(self.version))
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks})
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'])
with open(a , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=a))
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'])
with open(a , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=a))
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'])
with open(a , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=a))
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int:
SCREAMING_SNAKE_CASE = self.artists_decoder.get(a)
SCREAMING_SNAKE_CASE = [self.genres_decoder.get(a) for genre in genres_index]
SCREAMING_SNAKE_CASE = [self.lyrics_decoder.get(a) for character in lyric_index]
return artist, genres, lyrics
| 137 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase__ (_UpperCAmelCase=None):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('env')
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate env command')
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help='The config file to use for the default values in the launching script.')
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase)
return parser
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.__version__
SCREAMING_SNAKE_CASE = torch.cuda.is_available()
SCREAMING_SNAKE_CASE = is_xpu_available()
SCREAMING_SNAKE_CASE = is_npu_available()
SCREAMING_SNAKE_CASE = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file).to_dict()
SCREAMING_SNAKE_CASE = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(_UpperCAmelCase),
'PyTorch NPU available': str(_UpperCAmelCase),
'System RAM': F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
SCREAMING_SNAKE_CASE = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n')
print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()]))
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:')
SCREAMING_SNAKE_CASE = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()])
if isinstance(_UpperCAmelCase , _UpperCAmelCase)
else F'''\t{accelerate_config}'''
)
print(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = accelerate_config
return info
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = env_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
env_command(_UpperCAmelCase)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 137 | 1 |
"""simple docstring"""
from manim import *
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = Rectangle(height=0.5 , width=0.5)
_UpperCamelCase = Rectangle(height=0.25 , width=0.25)
_UpperCamelCase = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
_UpperCamelCase = [mem.copy() for i in range(6)]
_UpperCamelCase = [mem.copy() for i in range(6)]
_UpperCamelCase = VGroup(*__a).arrange(__a , buff=0)
_UpperCamelCase = VGroup(*__a).arrange(__a , buff=0)
_UpperCamelCase = VGroup(__a , __a).arrange(__a , buff=0)
_UpperCamelCase = Text('''CPU''' , font_size=24)
_UpperCamelCase = Group(__a , __a).arrange(__a , buff=0.5 , aligned_edge=__a)
cpu.move_to([-2.5, -0.5, 0])
self.add(__a)
_UpperCamelCase = [mem.copy() for i in range(4)]
_UpperCamelCase = VGroup(*__a).arrange(__a , buff=0)
_UpperCamelCase = Text('''GPU''' , font_size=24)
_UpperCamelCase = Group(__a , __a).arrange(__a , buff=0.5 , aligned_edge=__a)
gpu.move_to([-1, -1, 0])
self.add(__a)
_UpperCamelCase = [mem.copy() for i in range(6)]
_UpperCamelCase = VGroup(*__a).arrange(__a , buff=0)
_UpperCamelCase = Text('''Model''' , font_size=24)
_UpperCamelCase = Group(__a , __a).arrange(__a , buff=0.5 , aligned_edge=__a)
model.move_to([3, -1.0, 0])
self.add(__a)
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for i, rect in enumerate(__a):
rect.set_stroke(__a)
_UpperCamelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(__a , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=__a)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__a , buff=0.0)
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__a , buff=0.0)
self.add(__a)
model_cpu_arr.append(__a)
self.add(*__a , *__a , *__a)
_UpperCamelCase = [mem.copy() for i in range(6)]
_UpperCamelCase = VGroup(*__a).arrange(__a , buff=0)
_UpperCamelCase = Text('''Loaded Checkpoint''' , font_size=24)
_UpperCamelCase = Group(__a , __a).arrange(__a , buff=0.5 , aligned_edge=__a)
checkpoint.move_to([3, 0.5, 0])
self.add(__a)
_UpperCamelCase = []
_UpperCamelCase = []
for i, rect in enumerate(__a):
_UpperCamelCase = fill.copy().set_fill(__a , opacity=0.7)
target.move_to(__a)
ckpt_arr.append(__a)
_UpperCamelCase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.move_to(cpu_right_col_base[i - 5])
ckpt_cpu_arr.append(__a)
self.add(*__a , *__a)
_UpperCamelCase = Square(side_length=2.2)
key.move_to([-5, 2, 0])
_UpperCamelCase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(__a , __a)
_UpperCamelCase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(__a)
_UpperCamelCase = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0])
_UpperCamelCase = [meta_mem.copy() for i in range(6)]
_UpperCamelCase = [meta_mem.copy() for i in range(6)]
_UpperCamelCase = VGroup(*__a).arrange(__a , buff=0)
_UpperCamelCase = VGroup(*__a).arrange(__a , buff=0)
_UpperCamelCase = VGroup(__a , __a).arrange(__a , buff=0)
_UpperCamelCase = Text('''Disk''' , font_size=24)
_UpperCamelCase = Group(__a , __a).arrange(__a , buff=0.5 , aligned_edge=__a)
disk.move_to([-4.0, -1.25, 0])
self.play(Write(__a , run_time=3) , Write(__a , run_time=1) , Create(__a , run_time=1))
_UpperCamelCase = []
for i, rect in enumerate(__a):
_UpperCamelCase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i]).scale(0.5)
animations.append(MoveToTarget(__a , run_time=1.5))
self.play(*__a)
self.play(FadeOut(__a))
_UpperCamelCase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24)
step_a.move_to([2, 2, 0])
self.play(Write(__a , run_time=3))
self.play(
FadeOut(__a , __a , *__a , *__a) , )
self.wait()
| 100 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowercase__ = 'CIDAS/clipseg-rd64-refined'
lowercase__ = 'image_segmenter'
lowercase__ = CLIPSegForImageSegmentation
lowercase__ = ['image', 'text']
lowercase__ = ['image']
def __init__( self , *__a , **__a) -> Any:
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__a , **__a)
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=__a , return_tensors='''pt''')
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_UpperCamelCase = self.model(**__a).logits
return logits
def UpperCAmelCase ( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = outputs.cpu().detach().numpy()
_UpperCamelCase = 0
_UpperCamelCase = 1
return Image.fromarray((array * 2_55).astype(np.uinta))
| 100 | 1 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCAmelCase : List[Any] = _symbol_database.Default()
UpperCAmelCase : List[str] = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
UpperCAmelCase : List[str] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCAmelCase : List[Any] = 45
UpperCAmelCase : List[Any] = 15_81
UpperCAmelCase : int = 15_17
UpperCAmelCase : str = 15_70
UpperCAmelCase : Dict = 15_84
UpperCAmelCase : int = 17_93
UpperCAmelCase : Union[str, Any] = 17_95
UpperCAmelCase : Dict = 19_16
UpperCAmelCase : Union[str, Any] = 18_64
UpperCAmelCase : Dict = 19_05
UpperCAmelCase : List[str] = 19_19
UpperCAmelCase : Optional[int] = 24_29
UpperCAmelCase : Union[str, Any] = 22_08
UpperCAmelCase : str = 24_18
UpperCAmelCase : List[str] = 23_23
UpperCAmelCase : List[str] = 24_07
# @@protoc_insertion_point(module_scope)
| 280 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302 | 0 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def A (__lowerCamelCase :str ):
if not sentence:
return ""
_lowerCAmelCase = dict(zip(__lowerCamelCase , __lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 229 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 229 | 1 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "sigmoid"
__lowerCamelCase : Optional[int] = "softmax"
__lowerCamelCase : Dict = "none"
@add_end_docstrings(
__magic_name__ ,R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " ,)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = False
__lowerCamelCase : Optional[Any] = ClassificationFunction.NONE
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
super().__init__(**_lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _snake_case ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="" , **_lowerCAmelCase ) -> int:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
_lowerCAmelCase = tokenizer_kwargs
_lowerCAmelCase = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
_lowerCAmelCase = self.model.config.return_all_scores
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) or top_k is None:
_lowerCAmelCase = top_k
_lowerCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , _lowerCAmelCase , )
if return_all_scores:
_lowerCAmelCase = None
else:
_lowerCAmelCase = 1
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = super().__call__(*_lowerCAmelCase , **_lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCAmelCase = "top_k" not in kwargs
if isinstance(args[0] , _lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _snake_case ( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Dict[str, GenericTensor]:
_lowerCAmelCase = self.framework
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return self.tokenizer(**_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) == 1 and isinstance(inputs[0] , _lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.model(**_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase=True ) -> List[str]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
_lowerCAmelCase = self.model.config.function_to_apply
else:
_lowerCAmelCase = ClassificationFunction.NONE
_lowerCAmelCase = model_outputs["logits"][0]
_lowerCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCAmelCase = sigmoid(_lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCAmelCase = softmax(_lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCAmelCase = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCAmelCase = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(_lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda _lowerCAmelCase : x["score"] , reverse=_lowerCAmelCase )
if top_k is not None:
_lowerCAmelCase = dict_scores[:top_k]
return dict_scores
| 158 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_A = logging.getLogger(__name__)
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Optional[Any] = "token-classification"
def __init__(self : List[str] , _A : List[str] ) -> Optional[int]:
if type(_A ) == dict:
snake_case = Namespace(**_A )
snake_case = import_module("tasks" )
try:
snake_case = getattr(_A , hparams.task_type )
snake_case = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
snake_case = self.token_classification_task.get_labels(hparams.labels )
snake_case = CrossEntropyLoss().ignore_index
super().__init__(_A , len(self.labels ) , self.mode )
def UpperCAmelCase(self : Dict , **_A : Optional[Any] ) -> int:
return self.model(**_A )
def UpperCAmelCase(self : int , _A : Dict , _A : int ) -> Dict:
snake_case = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
snake_case = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case = self(**_A )
snake_case = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCAmelCase(self : Any ) -> int:
snake_case = self.hparams
for mode in ["train", "dev", "test"]:
snake_case = self._feature_file(_A )
if os.path.exists(_A ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , _A )
snake_case = torch.load(_A )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
snake_case = self.token_classification_task.read_examples_from_file(args.data_dir , _A )
snake_case = self.token_classification_task.convert_examples_to_features(
_A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , _A )
torch.save(_A , _A )
def UpperCAmelCase(self : str , _A : List[Any] , _A : Optional[int] , _A : List[Any] = False ) -> DataLoader:
snake_case = self._feature_file(_A )
logger.info("Loading features from cached file %s" , _A )
snake_case = torch.load(_A )
snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(_A , _A , _A , _A ) , batch_size=_A )
def UpperCAmelCase(self : int , _A : str , _A : List[str] ) -> Optional[int]:
"""Compute validation""" ""
snake_case = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
snake_case = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case = self(**_A )
snake_case = outputs[:2]
snake_case = logits.detach().cpu().numpy()
snake_case = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase(self : str , _A : List[str] ) -> Dict:
snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean()
snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
snake_case = np.argmax(_A , axis=2 )
snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
snake_case = dict(enumerate(self.labels ) )
snake_case = [[] for _ in range(out_label_ids.shape[0] )]
snake_case = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(_A , _A ),
'''precision''': precision_score(_A , _A ),
'''recall''': recall_score(_A , _A ),
'''f1''': fa_score(_A , _A ),
}
snake_case = dict(results.items() )
snake_case = results
return ret, preds_list, out_label_list
def UpperCAmelCase(self : Optional[Any] , _A : str ) -> Optional[Any]:
# when stable
snake_case = self._eval_end(_A )
snake_case = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase(self : Optional[int] , _A : Union[str, Any] ) -> List[Any]:
# updating to test_epoch_end instead of deprecated test_end
snake_case = self._eval_end(_A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase(_A : Tuple , _A : List[Any] ) -> Union[str, Any]:
# Add NER specific options
BaseTransformer.add_model_specific_args(_A , _A )
parser.add_argument(
"--task_type" , default="NER" , type=_A , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=_A , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=_A , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=_A , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
_A = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_A = NERTransformer.add_model_specific_args(parser, os.getcwd())
_A = parser.parse_args()
_A = NERTransformer(args)
_A = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_A = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
_A = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 352 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Dict = ["image_processor", "tokenizer"]
UpperCAmelCase__ : Dict = "LayoutLMv2ImageProcessor"
UpperCAmelCase__ : Optional[Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__(self : str , _A : Any=None , _A : Tuple=None , **_A : Optional[Any] ) -> Optional[int]:
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _A , )
snake_case = kwargs.pop("feature_extractor" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A , _A )
def __call__(self : int , _A : List[str] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
snake_case = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features["words"]
snake_case = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
snake_case = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(_A , encoded_inputs["overflow_to_sample_mapping"] )
snake_case = images
return encoded_inputs
def UpperCAmelCase(self : Dict , _A : Dict , _A : List[str] ) -> Optional[int]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(_A )} and {len(_A )}' )
return images_with_overflow
def UpperCAmelCase(self : Tuple , *_A : int , **_A : Dict ) -> str:
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase(self : str , *_A : List[Any] , **_A : List[Any] ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase(self : Tuple ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase(self : List[Any] ) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _A , )
return self.image_processor_class
@property
def UpperCAmelCase(self : Dict ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _A , )
return self.image_processor
| 137 | 0 |
def _a ( ) -> Optional[int]:
a = 0
for i in range(1 , 1_001 ):
total += i**i
return str(a )[-10:]
if __name__ == "__main__":
print(solution())
| 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = 42
@flax_register_to_config
class A_ ( nn.Module , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase__ = 32
lowerCAmelCase__ = 4
lowerCAmelCase__ = 4
lowerCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCAmelCase__ = False
lowerCAmelCase__ = (320, 640, 1280, 1280)
lowerCAmelCase__ = 2
lowerCAmelCase__ = 8
lowerCAmelCase__ = None
lowerCAmelCase__ = 1280
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = False
lowerCAmelCase__ = jnp.floataa
lowerCAmelCase__ = True
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :jax.random.KeyArray )-> FrozenDict:
# init input tensors
__A = (1, self.in_channels, self.sample_size, self.sample_size)
__A = jnp.zeros(_UpperCamelCase , dtype=jnp.floataa )
__A = jnp.ones((1,) , dtype=jnp.intaa )
__A = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__A , __A = jax.random.split(_UpperCamelCase )
__A = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )["params"]
def _lowerCAmelCase (self :Tuple )-> Optional[int]:
__A = self.block_out_channels
__A = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__A = self.num_attention_heads or self.attention_head_dim
# input
__A = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__A = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__A = FlaxTimestepEmbedding(_UpperCamelCase , dtype=self.dtype )
__A = self.only_cross_attention
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = (num_attention_heads,) * len(self.down_block_types )
# down
__A = []
__A = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__A = output_channel
__A = block_out_channels[i]
__A = i == len(_UpperCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__A = FlaxCrossAttnDownBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__A = FlaxDownBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_UpperCamelCase )
__A = down_blocks
# mid
__A = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__A = []
__A = list(reversed(_UpperCamelCase ) )
__A = list(reversed(_UpperCamelCase ) )
__A = list(reversed(_UpperCamelCase ) )
__A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__A = output_channel
__A = reversed_block_out_channels[i]
__A = reversed_block_out_channels[min(i + 1 , len(_UpperCamelCase ) - 1 )]
__A = i == len(_UpperCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__A = FlaxCrossAttnUpBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , prev_output_channel=_UpperCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__A = FlaxUpBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , prev_output_channel=_UpperCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_UpperCamelCase )
__A = output_channel
__A = up_blocks
# out
__A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self :Union[str, Any] , _UpperCamelCase :Any , _UpperCamelCase :List[str] , _UpperCamelCase :str , _UpperCamelCase :int=None , _UpperCamelCase :Optional[int]=None , _UpperCamelCase :bool = True , _UpperCamelCase :bool = False , )-> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_UpperCamelCase , jnp.ndarray ):
__A = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_UpperCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__A = timesteps.astype(dtype=jnp.floataa )
__A = jnp.expand_dims(_UpperCamelCase , 0 )
__A = self.time_proj(_UpperCamelCase )
__A = self.time_embedding(_UpperCamelCase )
# 2. pre-process
__A = jnp.transpose(_UpperCamelCase , (0, 2, 3, 1) )
__A = self.conv_in(_UpperCamelCase )
# 3. down
__A = (sample,)
for down_block in self.down_blocks:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A , __A = down_block(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , deterministic=not train )
else:
__A , __A = down_block(_UpperCamelCase , _UpperCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__A = ()
for down_block_res_sample, down_block_additional_residual in zip(
_UpperCamelCase , _UpperCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__A = new_down_block_res_samples
# 4. mid
__A = self.mid_block(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__A = down_block_res_samples[-(self.layers_per_block + 1) :]
__A = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = up_block(
_UpperCamelCase , temb=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , res_hidden_states_tuple=_UpperCamelCase , deterministic=not train , )
else:
__A = up_block(_UpperCamelCase , temb=_UpperCamelCase , res_hidden_states_tuple=_UpperCamelCase , deterministic=not train )
# 6. post-process
__A = self.conv_norm_out(_UpperCamelCase )
__A = nn.silu(_UpperCamelCase )
__A = self.conv_out(_UpperCamelCase )
__A = jnp.transpose(_UpperCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_UpperCamelCase )
| 117 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any = logging.get_logger(__name__)
__snake_case : int = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gptsan-japanese'
__snake_case = [
'past_key_values',
]
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int]=3_60_00 , lowerCAmelCase_ : Any=12_80 , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : Dict=81_92 , lowerCAmelCase_ : Any=40_96 , lowerCAmelCase_ : Dict=1_28 , lowerCAmelCase_ : Dict=10 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : Optional[int]=1_28 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : int=1e-5 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Any="float32" , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Dict=0.002 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : str=3_59_98 , lowerCAmelCase_ : List[str]=3_59_95 , lowerCAmelCase_ : Any=3_59_99 , **lowerCAmelCase_ : Union[str, Any] , ) -> Any:
'''simple docstring'''
A__ : Tuple =vocab_size
A__ : List[Any] =max_position_embeddings
A__ : Tuple =d_model
A__ : Tuple =d_ff
A__ : Optional[Any] =d_ext
A__ : List[Any] =d_spout
A__ : Tuple =num_switch_layers
A__ : Any =num_ext_layers
A__ : Tuple =num_switch_layers + num_ext_layers
A__ : Optional[Any] =num_heads
A__ : Optional[Any] =num_experts
A__ : Dict =expert_capacity
A__ : List[Any] =dropout_rate
A__ : Optional[int] =layer_norm_epsilon
A__ : List[Any] =router_bias
A__ : Tuple =router_jitter_noise
A__ : List[Any] =router_dtype
A__ : Optional[int] =router_ignore_padding_tokens
A__ : str =output_hidden_states
A__ : Optional[int] =output_attentions
A__ : str =initializer_factor
A__ : str =output_router_logits
A__ : List[str] =use_cache
super().__init__(
separator_token_id=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 136 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
A__ : Any =num_of_nodes
A__ : list[list[int]] =[]
A__ : dict[int, int] ={}
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : int , lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
A__ : str =self.find_component(lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
A__ : int =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
A__ : List[str] =self.find_component(lowerCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCAmelCase_ )
def lowercase__ ( self : str ) -> None:
'''simple docstring'''
A__ : Union[str, Any] =[]
A__ : List[str] =0
A__ : list[Any] =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
A__ : List[str] =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
A__ , A__ , A__ : Any =edge
A__ : Tuple =self.m_component[u]
A__ : Optional[Any] =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
A__ : Optional[int] =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ , A__ , A__ : Tuple =edge
A__ : Any =self.m_component[u]
A__ : Optional[Any] =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
A__ : int =[-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 1 |
"""simple docstring"""
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
__SCREAMING_SNAKE_CASE = {} # Mapping from char to TrieNode
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : list[str] ) -> None:
for word in words:
self.insert(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : str ) -> None:
__SCREAMING_SNAKE_CASE = self
for char in word:
if char not in curr.nodes:
__SCREAMING_SNAKE_CASE = TrieNode()
__SCREAMING_SNAKE_CASE = curr.nodes[char]
__SCREAMING_SNAKE_CASE = True
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : str ) -> bool:
__SCREAMING_SNAKE_CASE = self
for char in word:
if char not in curr.nodes:
return False
__SCREAMING_SNAKE_CASE = curr.nodes[char]
return curr.is_leaf
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str ) -> None:
def _delete(UpperCAmelCase__ : TrieNode , UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> bool:
if index == len(UpperCAmelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
__SCREAMING_SNAKE_CASE = False
return len(curr.nodes ) == 0
__SCREAMING_SNAKE_CASE = word[index]
__SCREAMING_SNAKE_CASE = curr.nodes.get(UpperCAmelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__SCREAMING_SNAKE_CASE = _delete(UpperCAmelCase__ , UpperCAmelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase__ , 0 )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if node.is_leaf:
print(lowerCAmelCase_ , end=" " )
for key, value in node.nodes.items():
print_words(lowerCAmelCase_ , word + key )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "banana bananas bandana band apple all beast".split()
__SCREAMING_SNAKE_CASE = TrieNode()
root.insert_many(lowerCAmelCase_ )
# print_words(root, "")
assert all(root.find(lowerCAmelCase_ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
print(str(lowerCAmelCase_ ) , "works!" if passes else "doesn't work :(" )
def UpperCAmelCase__ ():
'''simple docstring'''
assert test_trie()
def UpperCAmelCase__ ():
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 54 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase_ ( self : Any ) -> int:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(UpperCAmelCase__ ) # fails here
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 54 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CpmAntTokenizer
lowerCamelCase__ = False
def __A ( self : str ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = ["今天", "天气", "真", "好", "!"]
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE_ = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 305 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.