code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=128 , A__=32 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ):
A__ : List[Any] = parent
A__ : int = batch_size
A__ : Union[str, Any] = seq_length
A__ : List[str] = is_training
A__ : Optional[Any] = use_input_mask
A__ : str = use_token_type_ids
A__ : Union[str, Any] = use_labels
A__ : Optional[int] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Any = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : List[str] = intermediate_size
A__ : Any = hidden_act
A__ : Dict = hidden_dropout_prob
A__ : str = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Dict = type_sequence_label_size
A__ : Optional[int] = initializer_range
A__ : str = num_labels
A__ : Any = num_choices
A__ : Union[str, Any] = scope
def __A ( self ):
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[int] = None
if self.use_input_mask:
A__ : int = random_attention_mask([self.batch_size, self.seq_length] )
A__ : str = None
if self.use_token_type_ids:
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Tuple = None
A__ : int = None
A__ : str = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
A__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def __A ( self ):
(
A__
) : List[Any] = self.prepare_config_and_inputs()
A__ : Union[str, Any] = True
A__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : List[Any] = NezhaModel(config=A__ )
model.to(A__ )
model.eval()
A__ : Union[str, Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ )
A__ : str = model(A__ , token_type_ids=A__ )
A__ : Optional[int] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : List[str] = True
A__ : List[Any] = NezhaModel(A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(
A__ , attention_mask=A__ , token_type_ids=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
A__ : Optional[Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , encoder_hidden_states=A__ , )
A__ : Dict = model(A__ , attention_mask=A__ , token_type_ids=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Any = NezhaForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
A__ : int = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = NezhaForNextSentencePrediction(config=A__ )
model.to(A__ )
model.eval()
A__ : int = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : str = NezhaForPreTraining(config=A__ )
model.to(A__ )
model.eval()
A__ : Union[str, Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , next_sentence_label=A__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : str = NezhaForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
A__ : str = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[int] = self.num_labels
A__ : Optional[int] = NezhaForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Union[str, Any] = self.num_labels
A__ : List[Any] = NezhaForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
A__ : List[str] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : int = self.num_choices
A__ : Optional[int] = NezhaForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
A__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
A__ : Any = self.prepare_config_and_inputs()
(
A__
) : List[str] = config_and_inputs
A__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__: List[Any] = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__: List[Any] = True
def __A ( self , A__ , A__ , A__=False ):
A__ : Tuple = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class in get_values(A__ ):
A__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A__ )
A__ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
return inputs_dict
def __A ( self ):
A__ : Optional[int] = NezhaModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A__ )
def __A ( self ):
# This regression test was failing with PyTorch < 1.3
(
A__
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def __A ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__ )
def __A ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def __A ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = NezhaModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@slow
@require_torch_gpu
def __A ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A__ : Optional[Any] = True
A__ : Dict = model_class(config=A__ )
A__ : Optional[Any] = self._prepare_for_class(A__ , A__ )
A__ : List[Any] = torch.jit.trace(
A__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A__ , os.path.join(A__ , """bert.pt""" ) )
A__ : Optional[Any] = torch.jit.load(os.path.join(A__ , """bert.pt""" ) , map_location=A__ )
loaded(inputs_dict["""input_ids"""].to(A__ ) , inputs_dict["""attention_mask"""].to(A__ ) )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Optional[int] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
A__ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : Any = model(A__ , attention_mask=A__ )[0]
A__ : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A__ )
A__ : Optional[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1e-4 ) )
@slow
def __A ( self ):
A__ : List[Any] = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
A__ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : List[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Union[str, Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , A__ )
A__ : Optional[int] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1e-4 ) ) | 711 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase (lowercase_: int ) -> Optional[int]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ , A__ ):
super().__init__()
A__ : Optional[int] = module
A__ : List[Any] = nn.Sequential(
nn.Linear(module.in_features , A__ , bias=A__ ) , nn.Linear(A__ , module.out_features , bias=A__ ) , )
A__ : List[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self , A__ , *A__ , **A__ ):
return self.module(A__ , *A__ , **A__ ) + self.adapter(A__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = '''bigscience/bloom-1b7'''
# Constant values
UpperCAmelCase__: List[Any] = 2.1_09_65_95_52_69_25_74
UpperCAmelCase__: str = '''Hello my name is'''
UpperCAmelCase__: List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
UpperCAmelCase__: List[str] = 10
def __A ( self ):
# Models and tokenizer
A__ : Any = AutoTokenizer.from_pretrained(self.model_name )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
# Models and tokenizer
A__ : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Dict = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A__ , device_map="""auto""" )
def __A ( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : int = self.model_abit.config
self.assertTrue(hasattr(A__ , """quantization_config""" ) )
A__ : Optional[int] = config.to_dict()
A__ : str = config.to_diff_dict()
A__ : List[str] = config.to_json_string()
def __A ( self ):
from bitsandbytes.nn import Paramsabit
A__ : Any = self.model_fpaa.get_memory_footprint()
A__ : Union[str, Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self ):
A__ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Tuple = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A__ ) , self.EXPECTED_OUTPUTS )
def __A ( self ):
A__ : Tuple = BitsAndBytesConfig()
A__ : Dict = True
A__ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A__ , device_map="""auto""" )
A__ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Dict = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A__ ) , self.EXPECTED_OUTPUTS )
def __A ( self ):
with self.assertRaises(A__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A__ )
def __A ( self ):
A__ : Dict = BitsAndBytesConfig()
with self.assertRaises(A__ ):
A__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A__ , load_in_abit=A__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __A ( self ):
with self.assertRaises(A__ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(A__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A__ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(A__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Tuple = self.model_fpaa.to(torch.floataa )
A__ : List[str] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : int = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
A__ : List[str] = self.model_fpaa.float()
def __A ( self ):
A__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=A__ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a (unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ):
A__ : int = """t5-small"""
A__ : Optional[Any] = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : List[Any] = AutoTokenizer.from_pretrained(cls.model_name )
A__ : str = """Translate in German: Hello, my dog is cute"""
def __A ( self ):
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
from transformers import TaForConditionalGeneration
A__ : Tuple = TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] = None
# test with `t5-small`
A__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A__ , device_map="""auto""" )
A__ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any = model.generate(**A__ )
# test with `flan-t5-small`
A__ : Optional[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A__ , device_map="""auto""" )
A__ : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] = model.generate(**A__ )
A__ : Dict = modules
def __A ( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Dict = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A__ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Tuple = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : List[Any] = model.generate(**A__ )
# test with `flan-t5-small`
A__ : str = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A__ , device_map="""auto""" )
A__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Tuple = model.generate(**A__ )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
# model_name
A__ : Any = """bigscience/bloom-560m"""
A__ : int = """t5-small"""
# Different types of model
A__ : Union[str, Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A__ , device_map="""auto""" )
# Sequence classification model
A__ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A__ , device_map="""auto""" )
# CausalLM model
A__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A__ , device_map="""auto""" )
# Seq2seq model
A__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A__ , device_map="""auto""" )
def __A ( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
def __A ( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : str = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
def __A ( self ):
A__ : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A__ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Optional[int] = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A__ ) , self.EXPECTED_OUTPUTS )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : str = """facebook/opt-350m"""
super().setUp()
def __A ( self ):
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : List[str] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A__ ) ):
A__ : Any = LoRALayer(module.q_proj , rank=16 )
A__ : str = LoRALayer(module.k_proj , rank=16 )
A__ : List[str] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : List[str] = model.forward(**A__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A__ , A__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''gpt2-xl'''
UpperCAmelCase__: str = 3.31_91_85_48_54_15_21_87
| 712 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A_ : List[str] = 25_6047
A_ : Union[str, Any] = 25_6145
@require_sentencepiece
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = NllbTokenizer
UpperCAmelCase__: int = NllbTokenizerFast
UpperCAmelCase__: Union[str, Any] = True
UpperCAmelCase__: Union[str, Any] = True
UpperCAmelCase__: List[str] = {}
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple = NllbTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
A__ : Optional[int] = NllbTokenizer(A__ , keep_accents=A__ )
A__ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Dict = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __A ( self ):
A__ : int = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : Tuple = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : List[Any] = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Optional[int] = tempfile.mkdtemp()
A__ : str = tokenizer_r.save_pretrained(A__ )
A__ : str = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
A__ : str = tokenizer_r.from_pretrained(A__ )
A__ : Union[str, Any] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
A__ : str = tempfile.mkdtemp()
A__ : Optional[Any] = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
A__ : Union[str, Any] = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
A__ : Optional[int] = tokenizer_r.from_pretrained(A__ )
A__ : Union[str, Any] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
A__ : Dict = tempfile.mkdtemp()
A__ : int = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
A__ : Optional[int] = tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ : int = tokenizer_r.from_pretrained(A__ )
A__ : str = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
@require_torch
def __A ( self ):
if not self.test_seqaseq:
return
A__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
A__ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
A__ : Tuple = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
A__ : Dict = tokenizer.prepare_seqaseq_batch(
src_texts=A__ , tgt_texts=A__ , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
A__ : Optional[int] = tokenizer.prepare_seqaseq_batch(
A__ , tgt_texts=A__ , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
A__ : List[Any] = tokenizer.prepare_seqaseq_batch(
src_texts=A__ , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , A__ )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def __A ( self ):
pass
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : int = [AddedToken("""<special>""" , lstrip=A__ )]
A__ : List[str] = self.rust_tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ )
A__ : Tuple = tokenizer_r.encode("""Hey this is a <special> token""" )
A__ : int = tokenizer_r.encode("""<special>""" , add_special_tokens=A__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A__ : Dict = self.rust_tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ , )
A__ : Union[str, Any] = self.tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ )
A__ : Tuple = tokenizer_p.encode("""Hey this is a <special> token""" )
A__ : Any = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = '''facebook/nllb-200-distilled-600M'''
UpperCAmelCase__: List[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase__: Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCAmelCase__: Optional[int] = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def __A ( cls ):
A__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
A__ : Any = 1
return cls
def __A ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_6057 )
def __A ( self ):
A__ : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def __A ( self ):
self.assertIn(A__ , self.tokenizer.all_special_ids )
# fmt: off
A__ : Union[str, Any] = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
A__ : List[Any] = self.tokenizer.decode(A__ , skip_special_tokens=A__ )
A__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def __A ( self ):
A__ : Optional[int] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A__ )
A__ : int = 10
A__ : List[Any] = self.tokenizer(A__ , max_length=A__ , truncation=A__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , A__ )
self.assertEqual(len(A__ ) , A__ )
def __A ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_6203, 3] )
def __A ( self ):
A__ : int = tempfile.mkdtemp()
A__ : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A__ )
A__ : List[str] = NllbTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A__ )
@require_torch
def __A ( self ):
A__ : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Optional[Any] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(A__ , A__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
A__ : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A__ )
self.assertEqual(A__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __A ( self ):
A__ : List[str] = self.tokenizer(self.src_text , padding=A__ , truncation=A__ , max_length=3 , return_tensors="""pt""" )
A__ : str = self.tokenizer(
text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[int] = targets["""input_ids"""]
A__ : Dict = shift_tokens_right(
A__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self ):
A__ : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(A__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[25_6047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_6057,
} , )
@require_torch
def __A ( self ):
A__ : str = True
A__ : int = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
A__ : Any = False
A__ : Any = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 713 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 0 |
import string
from math import logaa
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> int:
A__ : Union[str, Any] = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
A__ : Dict = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> tuple[int, int]:
A__ : List[Any] = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
A__ : Optional[Any] = corpus_without_punctuation.split("""\n""" )
A__ : Optional[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase_ ))
def UpperCamelCase (lowercase_: int , lowercase_: int , lowercase_: List[Any]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> float:
return round(tf * idf , 3 )
| 714 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: CommonSchedulerState
# setable values
UpperCAmelCase__: jnp.ndarray
UpperCAmelCase__: jnp.ndarray
UpperCAmelCase__: Optional[int] = None
@classmethod
def __A ( cls , A__ , A__ , A__ ):
return cls(common=A__ , init_noise_sigma=A__ , timesteps=A__ )
@dataclass
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: DDPMSchedulerState
class _a (__magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: int = [e.name for e in FlaxKarrasDiffusionSchedulers]
UpperCAmelCase__: jnp.dtype
@property
def __A ( self ):
return True
@register_to_config
def __init__( self , A__ = 1000 , A__ = 0.0_0_0_1 , A__ = 0.0_2 , A__ = "linear" , A__ = None , A__ = "fixed_small" , A__ = True , A__ = "epsilon" , A__ = jnp.floataa , ):
A__ : Union[str, Any] = dtype
def __A ( self , A__ = None ):
if common is None:
A__ : List[str] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
A__ : Optional[Any] = jnp.array(1.0 , dtype=self.dtype )
A__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A__ , init_noise_sigma=A__ , timesteps=A__ , )
def __A ( self , A__ , A__ , A__ = None ):
return sample
def __A ( self , A__ , A__ , A__ = () ):
A__ : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
A__ : str = (jnp.arange(0 , A__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A__ , timesteps=A__ , )
def __A ( self , A__ , A__ , A__=None , A__=None ):
A__ : Dict = state.common.alphas_cumprod[t]
A__ : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A__ : Optional[int] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
A__ : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
A__ : Union[str, Any] = jnp.clip(A__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
A__ : Optional[int] = jnp.log(jnp.clip(A__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
A__ : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
A__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
A__ : Any = variance
A__ : Optional[Any] = state.common.betas[t]
A__ : Dict = (predicted_variance + 1) / 2
A__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self , A__ , A__ , A__ , A__ , A__ = None , A__ = True , ):
A__ : List[str] = timestep
if key is None:
A__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
A__ : Tuple = jnp.split(A__ , sample.shape[1] , axis=1 )
else:
A__ : Union[str, Any] = None
# 1. compute alphas, betas
A__ : Dict = state.common.alphas_cumprod[t]
A__ : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
A__ : List[Any] = 1 - alpha_prod_t
A__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
A__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A__ : List[str] = jnp.clip(A__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A__ : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
A__ : int = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A__ : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
A__ : str = jax.random.split(A__ , num=1 )
A__ : List[Any] = jax.random.normal(A__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A__ , A__ , predicted_variance=A__ ) ** 0.5) * noise
A__ : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
A__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A__ , state=A__ )
def __A ( self , A__ , A__ , A__ , A__ , ):
return add_noise_common(state.common , A__ , A__ , A__ )
def __A ( self , A__ , A__ , A__ , A__ , ):
return get_velocity_common(state.common , A__ , A__ , A__ )
def __len__( self ):
return self.config.num_train_timesteps
| 716 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = CTRLTokenizer
UpperCAmelCase__: int = False
UpperCAmelCase__: Optional[Any] = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
A__ : Union[str, Any] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Optional[int] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
A__ : str = {"""unk_token""": """<unk>"""}
A__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
def __A ( self , **A__ ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , A__ ):
A__ : Optional[int] = """adapt react readapt apt"""
A__ : Tuple = """adapt react readapt apt"""
return input_text, output_text
def __A ( self ):
A__ : Any = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ : str = """adapt react readapt apt"""
A__ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
A__ : List[str] = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
A__ : List[str] = tokens + [tokenizer.unk_token]
A__ : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
| 717 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Optional[int] = [10, 20, 30, 40, 50, 60]
A__ : str = [2, 4, 6, 8, 10, 12]
A__ : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(A__ , A__ , A__ ) , 210 )
def __A ( self ):
self.assertRaisesRegex(A__ , """max_weight must greater than zero.""" )
def __A ( self ):
self.assertRaisesRegex(A__ , """Weight can not be negative.""" )
def __A ( self ):
self.assertRaisesRegex(A__ , """Profit can not be negative.""" )
def __A ( self ):
self.assertRaisesRegex(A__ , """max_weight must greater than zero.""" )
def __A ( self ):
self.assertRaisesRegex(
A__ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 718 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _a(__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = StableUnCLIPPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__: Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__: str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase__: Union[str, Any] = False
def __A ( self ):
A__ : Dict = 32
A__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A__ : int = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=A__ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=A__ , num_layers=1 , )
torch.manual_seed(0 )
A__ : Optional[int] = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=A__ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
A__ : Dict = StableUnCLIPImageNormalizer(embedding_dim=A__ )
A__ : int = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
A__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A__ : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A__ , layers_per_block=1 , upcast_attention=A__ , use_linear_projection=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=A__ , steps_offset=1 , )
torch.manual_seed(0 )
A__ : List[Any] = AutoencoderKL()
A__ : Union[str, Any] = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Optional[int] = torch.manual_seed(A__ )
else:
A__ : int = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
A__ : int = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=A__ )
def __A ( self ):
A__ : str = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=A__ )
@slow
@require_torch_gpu
class _a(unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
A__ : Optional[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : List[str] = pipe("""anime turle""" , generator=A__ , output_type="""np""" )
A__ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A__ , A__ )
def __A ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Optional[int] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
A__ : List[Any] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : Any = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
A__ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 719 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ : Dict = get_tests_dir('fixtures')
A_ : List[str] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
A_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : List[Any] = 0
def __A ( self ):
A__ : List[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
A__ : Optional[int] = AutoFeatureExtractor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
A__ : Any = AutoFeatureExtractor.from_pretrained(A__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
A__ : str = WavaVecaFeatureExtractor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
A__ : Optional[int] = AutoFeatureExtractor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
A__ : List[Any] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
with self.assertRaisesRegex(
A__ , """bert-base is not a local folder and is not a valid model identifier""" ):
A__ : int = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __A ( self ):
with self.assertRaisesRegex(
A__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(A__ , revision="""aaaaaa""" )
def __A ( self ):
with self.assertRaisesRegex(
A__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
A__ : Optional[int] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __A ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A__ ):
A__ : str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
A__ : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=A__ )
A__ : Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=A__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A__ )
A__ : Any = AutoFeatureExtractor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __A ( self ):
try:
AutoConfig.register("""custom""" , A__ )
AutoFeatureExtractor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoFeatureExtractor.register(A__ , A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Dict = CustomFeatureExtractor.from_pretrained(A__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A__ )
A__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __A ( self ):
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = True
try:
AutoConfig.register("""custom""" , A__ )
AutoFeatureExtractor.register(A__ , A__ )
# If remote code is not set, the default is to use local
A__ : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
A__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=A__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
A__ : Any = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=A__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(A__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 720 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 0 |
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: List[str] , lowercase_: Dict , lowercase_: Union[str, Any] ) -> Tuple:
if height >= 1:
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
move_disk(lowercase_ , lowercase_ )
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] ) -> int:
print("""moving disk from""" , lowercase_ , """to""" , lowercase_ )
def UpperCamelCase () -> List[str]:
A__ : Union[str, Any] = int(input("""Height of hanoi: """ ).strip() )
move_tower(lowercase_ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 721 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 0 |
import inspect
import unittest
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __A ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
A__ : Tuple = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
A__ : Dict = """k-diffusion"""
elif backend == "invisible_watermark":
A__ : List[Any] = """invisible-watermark"""
assert backend in deps, F"""{backend} is not in the deps table!"""
| 700 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Tuple = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _a (unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(A__ )
@classmethod
def __A ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def __A ( self ):
A__ : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
A__ : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A__ , repo_id="""test-config""" , push_to_hub=A__ , use_auth_token=self._token )
A__ : str = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
def __A ( self ):
A__ : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
A__ : Optional[Any] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A__ , repo_id="""valid_org/test-config-org""" , push_to_hub=A__ , use_auth_token=self._token )
A__ : str = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
def __A ( self ):
CustomConfig.register_for_auto_class()
A__ : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
A__ : Optional[int] = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=A__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A__ : Optional[int] = c.n_embd + 1 # int
A__ : List[str] = c.resid_pdrop + 1.0 # float
A__ : Union[str, Any] = not c.scale_attn_weights # bool
A__ : Optional[Any] = c.summary_type + """foo""" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(A__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(A__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(A__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(A__ , c.summary_type , """mismatch for key: summary_type""" )
def __A ( self ):
A__ : Optional[Any] = PretrainedConfig()
A__ : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
A__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
A__ : int = [key for key, value in config_common_kwargs.items() if value == getattr(A__ , A__ )]
if len(A__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F""" {', '.join(A__ )}.""" )
def __A ( self ):
with self.assertRaises(A__ ):
# config is in subfolder, the following should not work without specifying the subfolder
A__ : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
A__ : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(A__ )
def __A ( self ):
# A mock response for an HTTP head request to emulate server down
A__ : Tuple = mock.Mock()
A__ : Tuple = 500
A__ : Tuple = {}
A__ : Dict = HTTPError
A__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A__ : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A__ ) as mock_head:
A__ : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self ):
# This test is for deprecated behavior and can be removed in v5
A__ : List[Any] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def __A ( self ):
A__ : Any = AutoConfig.from_pretrained("""bert-base-cased""" )
A__ : str = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(A__ )
A__ : List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(A__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A__ : Dict = ["""config.42.0.0.json"""]
A__ : Any = 768
configuration.save_pretrained(A__ )
shutil.move(os.path.join(A__ , """config.4.0.0.json""" ) , os.path.join(A__ , """config.42.0.0.json""" ) )
A__ : List[Any] = AutoConfig.from_pretrained(A__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __A ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
A__ : List[Any] = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
A__ : Union[str, Any] = """v4.0.0"""
A__ : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
A__ , return_unused_kwargs=A__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(A__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A__ : Tuple = """v3.0.0"""
A__ : str = old_transformers.models.auto.AutoConfig.from_pretrained(A__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 701 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = '''MCTCTFeatureExtractor'''
UpperCAmelCase__: Any = '''AutoTokenizer'''
def __init__( self , A__ , A__ ):
super().__init__(A__ , A__ )
A__ : str = self.feature_extractor
A__ : Optional[Any] = False
def __call__( self , *A__ , **A__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A__ : int = kwargs.pop("""raw_speech""" )
else:
A__ : Union[str, Any] = kwargs.pop("""audio""" , A__ )
A__ : Dict = kwargs.pop("""sampling_rate""" , A__ )
A__ : Union[str, Any] = kwargs.pop("""text""" , A__ )
if len(A__ ) > 0:
A__ : List[str] = args[0]
A__ : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A__ : List[Any] = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
A__ : Optional[Any] = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ : Optional[int] = encodings["""input_ids"""]
return inputs
def __A ( self , *A__ , **A__ ):
return self.tokenizer.batch_decode(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A__ , **A__ )
A__ : List[str] = kwargs.pop("""input_features""" , A__ )
A__ : Union[str, Any] = kwargs.pop("""labels""" , A__ )
if len(A__ ) > 0:
A__ : int = args[0]
A__ : Any = args[1:]
if input_features is not None:
A__ : Optional[int] = self.feature_extractor.pad(A__ , *A__ , **A__ )
if labels is not None:
A__ : Optional[Any] = self.tokenizer.pad(A__ , **A__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ : Dict = labels["""input_ids"""]
return input_features
def __A ( self , *A__ , **A__ ):
return self.tokenizer.decode(*A__ , **A__ )
@contextmanager
def __A ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A__ : Optional[Any] = True
A__ : Union[str, Any] = self.tokenizer
yield
A__ : Optional[Any] = self.feature_extractor
A__ : Optional[Any] = False
| 702 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
(A__) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
(A__) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
(A__) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
(A__) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
(A__) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
(A__) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
(A__) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
(A__) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=50 , A__=0.0_2 , A__=True , A__=None , ):
A__ : Optional[Any] = parent
A__ : Optional[Any] = batch_size
A__ : Any = seq_length
A__ : Any = is_training
A__ : List[Any] = use_input_mask
A__ : int = vocab_size
A__ : int = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : Dict = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Any = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : Optional[int] = initializer_range
A__ : Optional[int] = use_labels
A__ : List[Any] = scope
def __A ( self ):
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Union[str, Any] = None
if self.use_input_mask:
A__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def __A ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=A__ , initializer_range=self.initializer_range , )
def __A ( self ):
(
A__
) : List[str] = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , A__ , A__ , A__ , A__ , **A__ , ):
A__ : List[str] = BertGenerationEncoder(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[int] = model(A__ , attention_mask=A__ )
A__ : List[str] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , **A__ , ):
A__ : List[str] = True
A__ : List[Any] = BertGenerationEncoder(config=A__ )
model.to(A__ )
model.eval()
A__ : List[str] = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
A__ : Any = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , **A__ , ):
A__ : str = True
A__ : Dict = True
A__ : str = BertGenerationDecoder(config=A__ ).to(A__ ).eval()
# first forward pass
A__ : Any = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , use_cache=A__ , )
A__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
A__ : Optional[int] = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , output_hidden_states=A__ , )["""hidden_states"""][0]
A__ : Dict = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , past_key_values=A__ , output_hidden_states=A__ , )["""hidden_states"""][0]
# select random slice
A__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1e-3 ) )
def __A ( self , A__ , A__ , A__ , A__ , *A__ , ):
A__ : Dict = BertGenerationDecoder(A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self ):
A__ : str = self.prepare_config_and_inputs()
A__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase__: Union[str, Any] = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase__: Optional[Any] = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __A ( self ):
A__ : Union[str, Any] = BertGenerationEncoderTester(self )
A__ : str = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
A__ : int = """bert"""
self.model_tester.create_and_check_model(A__ , A__ , A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A__ )
def __A ( self ):
# This regression test was failing with PyTorch < 1.3
(
A__
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
A__ , A__ , A__ , A__ , A__ , A__ , )
def __A ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*A__ )
@slow
def __A ( self ):
A__ : int = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(A__ )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A__ : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A__ : Union[str, Any] = model(A__ )[0]
A__ : str = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , A__ )
A__ : Tuple = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4 ) )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Optional[int] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A__ : Union[str, Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A__ : str = model(A__ )[0]
A__ : str = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , A__ )
A__ : Optional[Any] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4 ) )
| 704 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 705 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 706 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from ..utils import DummyObject, requires_backends
class _a (metaclass=__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = ['''onnx''']
def __init__( self , *A__ , **A__ ):
requires_backends(self , ["""onnx"""] )
@classmethod
def __A ( cls , *A__ , **A__ ):
requires_backends(cls , ["""onnx"""] )
@classmethod
def __A ( cls , *A__ , **A__ ):
requires_backends(cls , ["""onnx"""] )
| 707 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 708 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 0 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 709 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 0 |
from __future__ import annotations
class _a :
'''simple docstring'''
def __init__( self , A__ ):
A__ : List[Any] = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(A__ ) != 0:
A__ : Optional[Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(A__ ) != cols:
raise error
for value in row:
if not isinstance(A__ , (int, float) ):
raise error
A__ : List[Any] = rows
else:
A__ : Union[str, Any] = []
def __A ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __A ( self ):
return len(self.rows )
@property
def __A ( self ):
return len(self.rows[0] )
@property
def __A ( self ):
return (self.num_rows, self.num_columns)
@property
def __A ( self ):
return self.order[0] == self.order[1]
def __A ( self ):
A__ : Any = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(A__ )
def __A ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __A ( self ):
return bool(self.determinant() )
def __A ( self , A__ , A__ ):
A__ : str = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(A__ ).determinant()
def __A ( self , A__ , A__ ):
if (row + column) % 2 == 0:
return self.get_minor(A__ , A__ )
return -1 * self.get_minor(A__ , A__ )
def __A ( self ):
return Matrix(
[
[self.get_minor(A__ , A__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __A ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __A ( self ):
A__ : Tuple = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(A__ )
def __A ( self ):
A__ : Optional[int] = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(A__ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def __A ( self , A__ , A__ = None ):
A__ : List[Any] = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(A__ , A__ ):
raise type_error
for value in row:
if not isinstance(A__ , (int, float) ):
raise type_error
if len(A__ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(A__ )
else:
A__ : Optional[int] = self.rows[0:position] + [row] + self.rows[position:]
def __A ( self , A__ , A__ = None ):
A__ : Any = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(A__ , A__ ):
raise type_error
for value in column:
if not isinstance(A__ , (int, float) ):
raise type_error
if len(A__ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
A__ : int = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A__ : Dict = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , A__ ):
if not isinstance(A__ , A__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , A__ ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , A__ ):
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , A__ ):
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , A__ ):
if isinstance(A__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(A__ , A__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(A__ , A__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , A__ ):
if not isinstance(A__ , A__ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
A__ : Union[str, Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __A ( cls , A__ , A__ ):
return sum(row[i] * column[i] for i in range(len(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : List[Any] = get_activation("""swish""" )
self.assertIsInstance(A__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __A ( self ):
A__ : Tuple = get_activation("""silu""" )
self.assertIsInstance(A__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __A ( self ):
A__ : Optional[int] = get_activation("""mish""" )
self.assertIsInstance(A__ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __A ( self ):
A__ : Optional[int] = get_activation("""gelu""" )
self.assertIsInstance(A__ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) | 711 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : str = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Any = ['''audio_values''', '''audio_mask''']
def __init__( self , A__=2048 , A__=1 , A__=[16, 16] , A__=128 , A__=4_4100 , A__=86 , A__=2048 , A__=0.0 , **A__ , ):
super().__init__(
feature_size=A__ , sampling_rate=A__ , padding_value=A__ , **A__ , )
A__ : int = spectrogram_length
A__ : List[str] = num_channels
A__ : Tuple = patch_size
A__ : Any = feature_size // self.patch_size[1]
A__ : Tuple = n_fft
A__ : Any = sampling_rate // hop_length_to_sampling_rate
A__ : Dict = sampling_rate
A__ : Dict = padding_value
A__ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A__ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=A__ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __A ( self , A__ ):
A__ : List[Any] = spectrogram(
A__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
A__ : List[Any] = log_spec[:, :-1]
A__ : Dict = log_spec - 20.0
A__ : List[str] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A__ , A__ = None , A__ = True , A__ = None , A__ = False , A__ = False , **A__ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A__ : Optional[int] = isinstance(A__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
A__ : Optional[int] = is_batched_numpy or (
isinstance(A__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A__ , np.ndarray ):
A__ : List[Any] = np.asarray(A__ , dtype=np.floataa )
elif isinstance(A__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A__ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A__ ):
A__ : Dict = [np.asarray(A__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A__ : Optional[int] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A__ : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A__ : Tuple = np.array(A__ ).astype(np.floataa )
# convert into correct format for padding
A__ : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A__ : str = np.ones([len(A__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A__ : Tuple = padded_audio_features * self.padding_value
for i in range(len(A__ ) ):
A__ : List[Any] = audio_features[i]
A__ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
A__ : Union[str, Any] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
A__ : Optional[int] = {"""audio_values""": padded_audio_features}
A__ : List[Any] = BatchFeature(data=A__ , tensor_type=A__ )
return encoded_inputs
| 713 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ : int = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: Tuple ) -> str:
A__ : Dict = torch.load(lowercase_ , map_location="""cpu""" )
if "model" in sd.keys():
A__ : List[Any] = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
A__ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
A__ : Any = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A__ : List[Any] = sd.pop(lowercase_ )
A__ : Optional[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A__ : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
A__ : List[str] = key.replace(""".qkv_proj.""" , """.q_proj.""" )
A__ : Optional[int] = key.replace(""".qkv_proj.""" , """.k_proj.""" )
A__ : Any = key.replace(""".qkv_proj.""" , """.v_proj.""" )
A__ : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A__ : int = torch.split(lowercase_ , depth // 3 , dim=0 )
A__ : Any = q
A__ : Optional[int] = k
A__ : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
A__ : Union[str, Any] = load_checkpoint(lowercase_ )
if config is not None:
A__ : Any = OPTConfig.from_pretrained(lowercase_ )
else:
A__ : List[str] = OPTConfig()
A__ : Optional[Any] = OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
A_ : Dict = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 714 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 715 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase (lowercase_: Dict , lowercase_: Union[str, Any] ) -> Union[str, Any]:
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[int] , lowercase_: Tuple ) -> Optional[int]:
A__ : Dict = tmp_path / """cache"""
A__ : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : Union[str, Any] = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCamelCase (lowercase_: int , lowercase_: Optional[int] , lowercase_: List[str] ) -> int:
A__ : Union[str, Any] = tmp_path / """cache"""
A__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : Any = features.copy() if features else default_expected_features
A__ : List[Any] = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : List[Any] = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] , lowercase_: List[str] ) -> Dict:
A__ : Optional[int] = tmp_path / """cache"""
A__ : Optional[int] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
A__ : Optional[int] = features.copy() if features else default_expected_features
A__ : int = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : Union[str, Any] = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCamelCase (lowercase_: int , lowercase_: Tuple ) -> int:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A__ : str = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
A__ : Optional[int] = features.copy()
A__ : Optional[int] = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : Union[str, Any] = tmp_path / """cache"""
A__ : Dict = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase (lowercase_: Dict , lowercase_: Dict , lowercase_: Optional[Any] ) -> Optional[Any]:
A__ : Union[str, Any] = tmp_path / """cache"""
A__ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : List[str] = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCamelCase (lowercase_: str , lowercase_: Union[str, Any] , lowercase_: List[str] ) -> List[Any]:
if issubclass(lowercase_ , lowercase_ ):
A__ : Optional[int] = jsonl_path
elif issubclass(lowercase_ , lowercase_ ):
A__ : Tuple = [jsonl_path]
A__ : int = tmp_path / """cache"""
A__ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : Tuple = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: int , lowercase_: Dict , lowercase_: Any=("train",) ) -> Optional[int]:
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
A__ : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Any , lowercase_: int ) -> Dict:
A__ : int = tmp_path / """cache"""
A__ : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : List[str] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCamelCase (lowercase_: str , lowercase_: Dict , lowercase_: Optional[int] ) -> int:
A__ : List[Any] = tmp_path / """cache"""
A__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : str = features.copy() if features else default_expected_features
A__ : Optional[int] = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : str = JsonDatasetReader({"""train""": jsonl_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Any , lowercase_: int ) -> List[str]:
if split:
A__ : List[Any] = {split: jsonl_path}
else:
A__ : List[str] = """train"""
A__ : Optional[Any] = {"""train""": jsonl_path, """test""": jsonl_path}
A__ : Any = tmp_path / """cache"""
A__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : List[Any] = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
return json.load(lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Optional[int]:
return [json.loads(lowercase_ ) for line in buffer]
class _a :
'''simple docstring'''
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __A ( self , A__ , A__ , A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ ).write()
buffer.seek(0 )
A__ : Union[str, Any] = load_json_function(A__ )
assert isinstance(A__ , A__ )
assert isinstance(exported_content[0] , A__ )
assert len(A__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __A ( self , A__ , A__ , A__ , A__ , A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , orient=A__ ).write()
buffer.seek(0 )
A__ : Any = load_json(A__ )
assert isinstance(A__ , A__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A__ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __A ( self , A__ , A__ , A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , num_proc=2 ).write()
buffer.seek(0 )
A__ : List[str] = load_json_function(A__ )
assert isinstance(A__ , A__ )
assert isinstance(exported_content[0] , A__ )
assert len(A__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __A ( self , A__ , A__ , A__ , A__ , A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , orient=A__ , num_proc=2 ).write()
buffer.seek(0 )
A__ : List[Any] = load_json(A__ )
assert isinstance(A__ , A__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A__ ) == 10
def __A ( self , A__ ):
with pytest.raises(A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def __A ( self , A__ , A__ , A__ , A__ , A__ ):
A__ : Dict = tmp_path_factory.mktemp("""data""" ) / F"""test.json.{extension}"""
A__ : Optional[int] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(A__ , A__ , compression=A__ ).write()
with fsspec.open(A__ , """rb""" , compression="""infer""" ) as f:
A__ : Dict = f.read()
with fsspec.open(A__ , """rb""" , compression="""infer""" ) as f:
A__ : Optional[Any] = f.read()
assert exported_content == original_content
| 716 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=400 , A__=True , A__=None , A__=True , A__=False , A__=True , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , ):
A__ : int = parent
A__ : Optional[Any] = batch_size
A__ : Tuple = num_channels
A__ : Dict = image_size
A__ : str = min_resolution
A__ : List[str] = max_resolution
A__ : Tuple = do_resize
A__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20}
A__ : Optional[Any] = do_thumbnail
A__ : int = do_align_axis
A__ : List[str] = do_pad
A__ : List[str] = do_normalize
A__ : Any = image_mean
A__ : List[Any] = image_std
def __A ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Tuple = DonutImageProcessor if is_vision_available() else None
def __A ( self ):
A__ : Optional[int] = DonutImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
self.assertTrue(hasattr(A__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(A__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(A__ , """do_pad""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
def __A ( self ):
A__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
A__ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
A__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def __A ( self ):
pass
@is_flaky()
def __A ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
A__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : Union[str, Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def __A ( self ):
# Initialize image_processing
A__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
A__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : Optional[int] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def __A ( self ):
# Initialize image_processing
A__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
A__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : List[Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 717 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A_ : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A_ : Optional[Any] = logging.getLogger()
def UpperCamelCase () -> Optional[Any]:
A__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
A__ : Tuple = parser.parse_args()
return args.f
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: int="eval" ) -> Dict:
A__ : List[Any] = os.path.join(lowercase_ , f"""{split}_results.json""" )
if os.path.exists(lowercase_ ):
with open(lowercase_ , """r""" ) as f:
return json.load(lowercase_ )
raise ValueError(f"""can't find {path}""" )
A_ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : Tuple = self.get_auto_remove_tmp_dir()
A__ : str = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_flax_glue.main()
A__ : Tuple = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
@slow
def __A ( self ):
A__ : List[str] = self.get_auto_remove_tmp_dir()
A__ : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_clm_flax.main()
A__ : int = get_results(A__ )
self.assertLess(result["""eval_perplexity"""] , 100 )
@slow
def __A ( self ):
A__ : Tuple = self.get_auto_remove_tmp_dir()
A__ : Union[str, Any] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_summarization_flax.main()
A__ : List[Any] = get_results(A__ , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def __A ( self ):
A__ : int = self.get_auto_remove_tmp_dir()
A__ : int = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_mlm_flax.main()
A__ : str = get_results(A__ )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def __A ( self ):
A__ : List[str] = self.get_auto_remove_tmp_dir()
A__ : Union[str, Any] = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_ta_mlm_flax.main()
A__ : str = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.4_2 )
@slow
def __A ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
A__ : Any = 7 if get_gpu_count() > 1 else 2
A__ : List[Any] = self.get_auto_remove_tmp_dir()
A__ : int = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_flax_ner.main()
A__ : Union[str, Any] = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def __A ( self ):
A__ : Optional[Any] = self.get_auto_remove_tmp_dir()
A__ : Tuple = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_qa.main()
A__ : Tuple = get_results(A__ )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 718 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A_ : List[Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
A_ : str = 'hopper-medium-v2'
A_ : Dict = gym.make(env_name)
A_ : str = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
A_ : List[Any] = env.reset()
A_ : str = 0
A_ : List[str] = 0
A_ : Any = 1000
A_ : Optional[Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A_ : int = pipeline(obs, planning_horizon=32)
# execute action in environment
A_ : Any = env.step(denorm_actions)
A_ : Any = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A_ : List[Any] = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 720 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ = None , A__ = None , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , **A__ , ):
super().__init__(
A__ , split=A__ , features=A__ , cache_dir=A__ , keep_in_memory=A__ , streaming=A__ , num_proc=A__ , **A__ , )
A__ : Tuple = field
A__ : Optional[int] = path_or_paths if isinstance(A__ , A__ ) else {self.split: path_or_paths}
A__ : List[Any] = Json(
cache_dir=A__ , data_files=A__ , features=A__ , field=A__ , **A__ , )
def __A ( self ):
# Build iterable dataset
if self.streaming:
A__ : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ : str = None
A__ : str = None
A__ : Union[str, Any] = None
A__ : Any = None
self.builder.download_and_prepare(
download_config=A__ , download_mode=A__ , verification_mode=A__ , base_path=A__ , num_proc=self.num_proc , )
A__ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=A__ , in_memory=self.keep_in_memory )
return dataset
class _a :
'''simple docstring'''
def __init__( self , A__ , A__ , A__ = None , A__ = None , **A__ , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
A__ : int = dataset
A__ : Any = path_or_buf
A__ : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ : str = num_proc
A__ : Union[str, Any] = """utf-8"""
A__ : List[Any] = to_json_kwargs
def __A ( self ):
A__ : List[Any] = self.to_json_kwargs.pop("""path_or_buf""" , A__ )
A__ : List[Any] = self.to_json_kwargs.pop("""orient""" , """records""" )
A__ : Optional[Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A__ : Optional[int] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A__ : int = self.to_json_kwargs.pop("""compression""" , A__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=A__ ) as buffer:
A__ : Optional[Any] = self._write(file_obj=A__ , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
""" was passed. Please provide a local path instead.""" )
A__ : int = self._write(
file_obj=self.path_or_buf , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
return written
def __A ( self , A__ ):
A__ : int = args
A__ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(A__ , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ : List[Any] = batch.to_pandas().to_json(
path_or_buf=A__ , orient=A__ , lines=A__ , index=A__ , **A__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __A ( self , A__ , A__ , A__ , A__ , **A__ , ):
A__ : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A__ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(A__ )
else:
A__ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A__ , A__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(A__ )
return written
| 721 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Union[str, Any] , lowercase_: Optional[int] ) -> Optional[Any]:
# Initialise PyTorch model
A__ : str = LxmertConfig.from_json_file(lowercase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
A__ : Union[str, Any] = LxmertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 700 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = None
def __A ( self ):
A__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Optional[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A__ )
def __A ( self ):
A__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : str = os.path.join(A__ , """feat_extract.json""" )
feat_extract_first.to_json_file(A__ )
A__ : Optional[Any] = self.feature_extraction_class.from_json_file(A__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
A__ : Optional[Any] = self.feature_extraction_class.from_pretrained(A__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A ( self ):
A__ : int = self.feature_extraction_class()
self.assertIsNotNone(A__ )
| 701 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = XGLMTokenizer
UpperCAmelCase__: List[Any] = XGLMTokenizerFast
UpperCAmelCase__: Tuple = True
UpperCAmelCase__: str = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Any = XGLMTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
A__ : Tuple = """<pad>"""
A__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def __A ( self ):
A__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(A__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
A__ : Optional[int] = XGLMTokenizer(A__ , keep_accents=A__ )
A__ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : List[str] = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : List[Any] = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A__ , f.name )
A__ : List[str] = XGLMTokenizer(f.name , keep_accents=A__ )
A__ : List[Any] = pickle.dumps(A__ )
pickle.loads(A__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
A__ : List[str] = self.get_tokenizer()
A__ : Dict = self.get_rust_tokenizer()
A__ : int = """I was born in 92000, and this is falsé."""
A__ : List[Any] = tokenizer.tokenize(A__ )
A__ : str = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
A__ : Union[str, Any] = tokenizer.encode(A__ , add_special_tokens=A__ )
A__ : List[str] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
A__ : int = self.get_rust_tokenizer()
A__ : Optional[int] = tokenizer.encode(A__ )
A__ : List[str] = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
@slow
def __A ( self ):
A__ : Optional[int] = """Hello World!"""
A__ : Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(A__ , self.big_tokenizer.encode(A__ ) )
@slow
def __A ( self ):
A__ : Dict = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
A__ : str = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(A__ , self.big_tokenizer.encode(A__ ) )
@slow
def __A ( self ):
# fmt: off
A__ : Tuple = {
"""input_ids""": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name="""facebook/xglm-564M""" , padding=A__ , )
| 702 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: int = '''naver-clova-ix/donut-base-finetuned-docvqa'''
UpperCAmelCase__: Optional[int] = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
UpperCAmelCase__: int = '''document_qa'''
UpperCAmelCase__: Union[str, Any] = AutoProcessor
UpperCAmelCase__: Optional[Any] = VisionEncoderDecoderModel
UpperCAmelCase__: Union[str, Any] = ['''image''', '''text''']
UpperCAmelCase__: Union[str, Any] = ['''text''']
def __init__( self , *A__ , **A__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*A__ , **A__ )
def __A ( self , A__ , A__ ):
A__ : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
A__ : List[str] = task_prompt.replace("""{user_input}""" , A__ )
A__ : Optional[Any] = self.pre_processor.tokenizer(
A__ , add_special_tokens=A__ , return_tensors="""pt""" ).input_ids
A__ : int = self.pre_processor(A__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , A__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=A__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=A__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=A__ , ).sequences
def __A ( self , A__ ):
A__ : Optional[int] = self.pre_processor.batch_decode(A__ )[0]
A__ : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
A__ : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
A__ : List[Any] = re.sub(r"""<.*?>""" , """""" , A__ , count=1 ).strip() # remove first task start token
A__ : Tuple = self.pre_processor.tokenajson(A__ )
return sequence["answer"]
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A_ : Any = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__: List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase__: List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase__: Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = ZeroShotClassificationPipeline(
model=A__ , tokenizer=A__ , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __A ( self , A__ , A__ ):
A__ : Union[str, Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ )], """scores""": [ANY(A__ )]} )
# No kwarg
A__ : Tuple = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ )], """scores""": [ANY(A__ )]} )
A__ : Optional[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ )], """scores""": [ANY(A__ )]} )
A__ : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ ), ANY(A__ )], """scores""": [ANY(A__ ), ANY(A__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
A__ : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ ), ANY(A__ )], """scores""": [ANY(A__ ), ANY(A__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
A__ : List[Any] = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ )], """scores""": [ANY(A__ )]} )
# https://github.com/huggingface/transformers/issues/13846
A__ : Tuple = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
A__ , [
{"""sequence""": ANY(A__ ), """labels""": [ANY(A__ ), ANY(A__ )], """scores""": [ANY(A__ ), ANY(A__ )]}
for i in range(1 )
] , )
A__ : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
A__ , [
{"""sequence""": ANY(A__ ), """labels""": [ANY(A__ ), ANY(A__ )], """scores""": [ANY(A__ ), ANY(A__ )]}
for i in range(2 )
] , )
with self.assertRaises(A__ ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(A__ ):
classifier(A__ , candidate_labels="""politics""" )
with self.assertRaises(A__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(A__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=A__ )
with self.assertRaises(A__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(A__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A__ , )
self.run_entailment_id(A__ )
def __A ( self , A__ ):
A__ : str = zero_shot_classifier.model.config
A__ : Any = config.labelaid
A__ : int = zero_shot_classifier.entailment_id
A__ : List[str] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A__ : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A__ : int = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A__ : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A__ : Union[str, Any] = original_labelaid
self.assertEqual(A__ , zero_shot_classifier.entailment_id )
@require_torch
def __A ( self ):
A__ : Optional[int] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def __A ( self ):
A__ : Dict = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
A__ : List[str] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def __A ( self ):
A__ : Any = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
A__ : Any = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def __A ( self ):
A__ : List[str] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
A__ : Dict = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A__ : Tuple = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A__ , )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def __A ( self ):
A__ : Optional[Any] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
A__ : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A__ : Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A__ , )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 704 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : Dict = logging.get_logger(__name__)
A_ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
A_ : Any = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
A_ : Union[str, Any] = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: str = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: List[str] = RoFormerTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
A__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , A__ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , A__ ) != strip_accents
):
A__ : Dict = getattr(A__ , pre_tok_state.pop("""type""" ) )
A__ : int = do_lower_case
A__ : str = strip_accents
A__ : int = pre_tok_class(**A__ )
A__ : List[str] = do_lower_case
def __getstate__( self ):
A__ : Union[str, Any] = self.__dict__.copy()
A__ : Tuple = BertPreTokenizer()
return state
def __setstate__( self , A__ ):
A__ : List[Any] = d
A__ : str = self.__dict__["""_tokenizer"""].get_vocab()
A__ : Optional[int] = PreTokenizer.custom(JiebaPreTokenizer(A__ ) )
def __A ( self , A__ , A__=None ):
A__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A__ , A__ = None ):
A__ : Dict = [self.sep_token_id]
A__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : str = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def __A ( self , A__ , A__=None , A__=None , A__=False , **A__ , ):
A__ : str = BertPreTokenizer()
return super().save_pretrained(A__ , A__ , A__ , A__ , **A__ )
| 705 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 0 |
def UpperCamelCase ( lowercase_: int = 1000000 ) -> int:
A__ : Optional[int] = set(range(3 , lowercase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase_ , lowercase_ ) ) )
A__ : str = [float(lowercase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase_ , limit + 1 , lowercase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 706 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = XLMTokenizer
UpperCAmelCase__: List[Any] = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A__ : Tuple = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : str = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(A__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(A__ ) )
def __A ( self , A__ ):
A__ : Tuple = """lower newer"""
A__ : Optional[int] = """lower newer"""
return input_text, output_text
def __A ( self ):
A__ : int = XLMTokenizer(self.vocab_file , self.merges_file )
A__ : Tuple = """lower"""
A__ : int = ["""low""", """er</w>"""]
A__ : Optional[int] = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
A__ : List[str] = tokens + ["""<unk>"""]
A__ : Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
@slow
def __A ( self ):
A__ : Union[str, Any] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
A__ : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
A__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
A__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A__ )
A__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 707 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 0 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: str , lowercase_: Any , lowercase_: List[Any] , lowercase_: Tuple ) -> Any:
# load base model
A__ : List[str] = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ : Dict = load_file(lowercase_ )
A__ : List[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ : Optional[Any] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
A__ : str = pipeline.text_encoder
else:
A__ : Any = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
A__ : Union[str, Any] = pipeline.unet
# find the target layer
A__ : List[str] = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ : Optional[Any] = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ : List[str] = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ : Dict = layer_infos.pop(0 )
A__ : str = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ : List[str] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ : Optional[int] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ : str = state_dict[pair_keys[0]].to(torch.floataa )
A__ : List[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
A_ : str = parser.parse_args()
A_ : int = args.base_model_path
A_ : Any = args.checkpoint_path
A_ : Optional[int] = args.dump_path
A_ : Dict = args.lora_prefix_unet
A_ : Optional[int] = args.lora_prefix_text_encoder
A_ : List[str] = args.alpha
A_ : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A_ : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 709 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
A_ : List[Any] = logging.get_logger(__name__)
A_ : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : Union[str, Any] = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
A_ : Dict = {'allegro/herbert-base-cased': 514}
A_ : Tuple = {}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Optional[int] = HerbertTokenizer
def __init__( self , A__=None , A__=None , A__=None , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__="</s>" , **A__ , ):
super().__init__(
A__ , A__ , tokenizer_file=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , sep_token=A__ , **A__ , )
def __A ( self , A__ , A__ = None ):
A__ : int = [self.cls_token_id]
A__ : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
def __A ( self , A__ , A__ = None ):
A__ : Optional[Any] = [self.sep_token_id]
A__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : List[str] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 710 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCamelCase () -> Any:
A__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=lowercase_ , default=lowercase_ , required=lowercase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=lowercase_ , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=lowercase_ , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=lowercase_ , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=lowercase_ , default=0 , help="""cuda_id.""" , )
A__ : Optional[int] = parser.parse_args()
return args
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int , lowercase_: Union[str, Any] ) -> Tuple:
if not len(lowercase_ ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
A__ : Union[str, Any] = imgs[0].size
A__ : Optional[int] = Image.new("""RGB""" , size=(cols * w, rows * h) )
A__ : Tuple = grid.size
for i, img in enumerate(lowercase_ ):
grid.paste(lowercase_ , box=(i % cols * w, i // cols * h) )
return grid
def UpperCamelCase (lowercase_: str , lowercase_: Optional[Any]="robotic cat with wings" , lowercase_: Tuple=7.5 , lowercase_: Dict=50 , lowercase_: Optional[Any]=1 , lowercase_: Dict=42 , ) -> int:
A__ : List[str] = torch.Generator(pipeline.device ).manual_seed(lowercase_ )
A__ : int = pipeline(
lowercase_ , guidance_scale=lowercase_ , num_inference_steps=lowercase_ , generator=lowercase_ , num_images_per_prompt=lowercase_ , ).images
A__ : Dict = int(math.sqrt(lowercase_ ) )
A__ : Optional[int] = image_grid(lowercase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
A_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
A_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
A_ : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
A_ : Dict = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
A_ : int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
A_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
A_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
A_ : int = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
A_ : Union[str, Any] = unet.to(torch.device('cuda', args.cuda_id))
A_ : int = pipeline.to(unet.device)
A_ : List[str] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
A_ : Optional[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1))) | 711 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Tuple:
A__ : Dict = {}
A__ : Optional[int] = job["""started_at"""]
A__ : Tuple = job["""completed_at"""]
A__ : Any = date_parser.parse(lowercase_ )
A__ : Tuple = date_parser.parse(lowercase_ )
A__ : Dict = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ : int = start
A__ : Tuple = end
A__ : int = duration_in_min
return job_info
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: List[str]=None ) -> Tuple:
A__ : List[Any] = None
if token is not None:
A__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
A__ : Any = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
A__ : List[str] = requests.get(lowercase_ , headers=lowercase_ ).json()
A__ : str = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
A__ : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase_ ):
A__ : Optional[int] = requests.get(url + f"""&page={i + 2}""" , headers=lowercase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
A_ : List[str] = parser.parse_args()
A_ : str = get_job_time(args.workflow_run_id)
A_ : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 712 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__: ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCAmelCase__: ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase__: str = "text"
UpperCAmelCase__: str = "labels"
def __A ( self , A__ ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , A__ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
A__ : Optional[Any] = copy.deepcopy(self )
A__ : Tuple = self.label_schema.copy()
A__ : List[Any] = features[self.label_column]
A__ : str = label_schema
return task_template
@property
def __A ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 713 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCamelCase (lowercase_: str , lowercase_: str , **lowercase_: str ) -> Optional[int]:
A__ : str = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
A__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowercase_ )
model.save_pretrained(lowercase_ )
AutoTokenizer.from_pretrained(lowercase_ ).save_pretrained(lowercase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 714 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
A_ : Tuple = 'facebook/wmt19-en-de'
A_ : int = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
A_ : Dict = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
A_ : List[str] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
A_ : int = tokenizer(['Making tiny model'], return_tensors='pt')
A_ : Optional[int] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
A_ : Optional[Any] = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 715 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 716 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A_ : Tuple = logging.getLogger(__name__)
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__: Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__: Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__: Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__: bool = field(
default=__magic_name__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__: str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__: bool = field(
default=__magic_name__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: Optional[str] = field(default=__magic_name__ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__: Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__: bool = field(
default=__magic_name__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__: bool = field(
default=__magic_name__ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __A ( self ):
if self.train_file is not None:
A__ : int = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ : Any = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: PreTrainedTokenizerBase
UpperCAmelCase__: Union[bool, str, PaddingStrategy] = True
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Optional[int] = None
def __call__( self , A__ ):
A__ : Dict = """label""" if """label""" in features[0].keys() else """labels"""
A__ : int = [feature.pop(A__ ) for feature in features]
A__ : Tuple = len(A__ )
A__ : Any = len(features[0]["""input_ids"""] )
A__ : str = [
[{k: v[i] for k, v in feature.items()} for i in range(A__ )] for feature in features
]
A__ : Dict = list(chain(*A__ ) )
A__ : Dict = self.tokenizer.pad(
A__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
A__ : List[Any] = {k: v.view(A__ , A__ , -1 ) for k, v in batch.items()}
# Add back labels
A__ : Any = torch.tensor(A__ , dtype=torch.intaa )
return batch
def UpperCamelCase () -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ : Any = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ : Union[str, Any] = {}
if data_args.train_file is not None:
A__ : List[str] = data_args.train_file
if data_args.validation_file is not None:
A__ : Optional[Any] = data_args.validation_file
A__ : List[Any] = data_args.train_file.split(""".""" )[-1]
A__ : Any = load_dataset(
lowercase_ , data_files=lowercase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ : int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ : Optional[int] = [f"""ending{i}""" for i in range(4 )]
A__ : List[str] = """sent1"""
A__ : List[str] = """sent2"""
if data_args.max_seq_length is None:
A__ : Tuple = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
A__ : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
A__ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase_: Dict ):
A__ : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
A__ : str = examples[question_header_name]
A__ : List[str] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase_ )
]
# Flatten out
A__ : int = list(chain(*lowercase_ ) )
A__ : Any = list(chain(*lowercase_ ) )
# Tokenize
A__ : Optional[Any] = tokenizer(
lowercase_ , lowercase_ , truncation=lowercase_ , max_length=lowercase_ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
A__ : Dict = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
A__ : int = min(len(lowercase_ ) , data_args.max_train_samples )
A__ : str = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
A__ : str = train_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
A__ : Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
A__ : List[Any] = min(len(lowercase_ ) , data_args.max_eval_samples )
A__ : int = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
A__ : Dict = eval_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase_: str ):
A__ : Union[str, Any] = eval_predictions
A__ : Dict = np.argmax(lowercase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ : Union[str, Any] = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , )
# Training
if training_args.do_train:
A__ : str = None
if training_args.resume_from_checkpoint is not None:
A__ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ : List[Any] = last_checkpoint
A__ : Optional[int] = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ : Tuple = train_result.metrics
A__ : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
A__ : int = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("""train""" , lowercase_ )
trainer.save_metrics("""train""" , lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A__ : Any = trainer.evaluate()
A__ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
A__ : Union[str, Any] = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
A__ : Union[str, Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
def UpperCamelCase (lowercase_: Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 717 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 718 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
import warnings
from .generation import TFGenerationMixin
class _a(__magic_name__ ):
'''simple docstring'''
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
| 719 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase () -> Any:
A__ : Any = HfArgumentParser(lowercase_ )
A__ : List[Any] = parser.parse_args_into_dataclasses()[0]
A__ : str = TensorFlowBenchmark(args=lowercase_ )
try:
A__ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A__ : List[Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A__ : Tuple = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
A__ : str = """"""
A__ : Tuple = eval(str(lowercase_ ).split(""" """ )[-1] )
A__ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
A__ : str = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 720 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 0 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__=768 ):
super().__init__(A__ )
A__ : Tuple = proj_size
A__ : int = CLIPVisionModel(A__ )
A__ : Union[str, Any] = PaintByExampleMapper(A__ )
A__ : Any = nn.LayerNorm(config.hidden_size )
A__ : List[str] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A__ : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __A ( self , A__ , A__=False ):
A__ : Optional[int] = self.model(pixel_values=A__ )
A__ : Dict = clip_output.pooler_output
A__ : Union[str, Any] = self.mapper(latent_states[:, None] )
A__ : Optional[Any] = self.final_layer_norm(A__ )
A__ : Any = self.proj_out(A__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__()
A__ : Optional[int] = (config.num_hidden_layers + 1) // 5
A__ : List[Any] = config.hidden_size
A__ : Optional[int] = 1
A__ : Tuple = nn.ModuleList(
[
BasicTransformerBlock(A__ , A__ , A__ , activation_fn="""gelu""" , attention_bias=A__ )
for _ in range(A__ )
] )
def __A ( self , A__ ):
for block in self.blocks:
A__ : str = block(A__ )
return hidden_states
| 700 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A__ , """num_attention_heads""" ) )
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=64 , A__=3 , A__=3 , A__=2 , A__=1 , A__=16 , A__=[128, 256, 384] , A__=[4, 6, 8] , A__=[2, 3, 4] , A__=[16, 16, 16] , A__=0 , A__=[2, 2, 2] , A__=[2, 2, 2] , A__=0.0_2 , A__=True , A__=True , A__=2 , ):
A__ : Dict = parent
A__ : str = batch_size
A__ : Dict = image_size
A__ : Optional[int] = num_channels
A__ : List[str] = kernel_size
A__ : List[str] = stride
A__ : Union[str, Any] = padding
A__ : int = hidden_sizes
A__ : Union[str, Any] = num_attention_heads
A__ : Union[str, Any] = depths
A__ : List[str] = key_dim
A__ : Any = drop_path_rate
A__ : List[Any] = patch_size
A__ : Optional[int] = attention_ratio
A__ : Optional[Any] = mlp_ratio
A__ : List[Any] = initializer_range
A__ : List[Any] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
A__ : int = is_training
A__ : List[str] = use_labels
A__ : Tuple = num_labels
A__ : int = initializer_range
def __A ( self ):
A__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any = None
if self.use_labels:
A__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
A__ : str = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __A ( self , A__ , A__ , A__ ):
A__ : Optional[int] = LevitModel(config=A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(A__ )
A__ : str = (self.image_size, self.image_size)
A__ : Any = image_size[0], image_size[1]
for _ in range(4 ):
A__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
A__ : Union[str, Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __A ( self , A__ , A__ , A__ ):
A__ : Optional[Any] = self.num_labels
A__ : Any = LevitForImageClassification(A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
A__ : Tuple = self.prepare_config_and_inputs()
A__ : List[Any] = config_and_inputs
A__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCAmelCase__: Union[str, Any] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__: List[str] = False
UpperCAmelCase__: List[Any] = False
UpperCAmelCase__: int = False
UpperCAmelCase__: Optional[int] = False
UpperCAmelCase__: Dict = False
def __A ( self ):
A__ : Dict = LevitModelTester(self )
A__ : Optional[int] = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def __A ( self ):
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def __A ( self ):
pass
def __A ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : str = model_class(A__ )
A__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : str = [*signature.parameters.keys()]
A__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def __A ( self ):
def check_hidden_states_output(A__ , A__ , A__ ):
A__ : str = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
A__ : List[Any] = model(**self._prepare_for_class(A__ , A__ ) )
A__ : List[Any] = outputs.hidden_states
A__ : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(A__ ) , A__ )
A__ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
A__ : List[str] = image_size[0], image_size[1]
for _ in range(4 ):
A__ : str = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
A__ : List[str] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Union[str, Any] = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Dict = True
check_hidden_states_output(A__ , A__ , A__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __A ( self ):
pass
def __A ( self , A__ , A__ , A__=False ):
A__ : Tuple = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
def __A ( self ):
if not self.model_tester.is_training:
return
A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Dict = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
A__ : Dict = model_class(A__ )
model.to(A__ )
model.train()
A__ : Union[str, Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ )
A__ : List[Any] = model(**A__ ).loss
loss.backward()
def __A ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A__ : Dict = False
A__ : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(A__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
A__ : List[Any] = model_class(A__ )
model.gradient_checkpointing_enable()
model.to(A__ )
model.train()
A__ : Optional[Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ )
A__ : Any = model(**A__ ).loss
loss.backward()
def __A ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
A__ : Any = problem_type["""title"""]
A__ : Any = problem_type["""num_labels"""]
A__ : List[Any] = model_class(A__ )
model.to(A__ )
model.train()
A__ : Optional[Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ )
if problem_type["num_labels"] > 1:
A__ : Any = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A__ : Dict = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A__ ) as warning_list:
A__ : Dict = model(**A__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __A ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = LevitModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCamelCase () -> Dict:
A__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __A ( self ):
A__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A__ )
A__ : Optional[int] = self.default_image_processor
A__ : List[Any] = prepare_img()
A__ : Dict = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
A__ : Optional[int] = model(**A__ )
# verify the logits
A__ : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
A__ : Any = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 701 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 0 |
A_ : Any = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 702 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Optional[int]:
A__ : Union[str, Any] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"""{test_file} instead.""" )
A__ : Dict = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
A__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )]
A__ : Tuple = """.""".join(lowercase_ )
return test_module_path
def UpperCamelCase (lowercase_: Any ) -> List[str]:
A__ : Optional[Any] = get_module_path(lowercase_ )
A__ : int = importlib.import_module(lowercase_ )
return test_module
def UpperCamelCase (lowercase_: Optional[Any] ) -> Union[str, Any]:
A__ : List[Any] = []
A__ : List[str] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(lowercase_ , lowercase_ ) )
# sort with class names
return sorted(lowercase_ , key=lambda lowercase_ : x.__name__ )
def UpperCamelCase (lowercase_: Tuple ) -> List[str]:
A__ : List[str] = []
A__ : Tuple = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
A__ : List[str] = getattr(lowercase_ , lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
A__ : List[Any] = getattr(lowercase_ , """all_model_classes""" , [] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ , key=lambda lowercase_ : x.__name__ )
def UpperCamelCase (lowercase_: Optional[Any] ) -> int:
A__ : List[str] = get_test_classes(lowercase_ )
A__ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ , key=lambda lowercase_ : x.__name__ )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> int:
A__ : Any = test_class()
if hasattr(lowercase_ , """setUp""" ):
test.setUp()
A__ : Any = None
if hasattr(lowercase_ , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
A__ : Tuple = test.model_tester.__class__
return model_tester
def UpperCamelCase (lowercase_: int , lowercase_: Union[str, Any] ) -> Union[str, Any]:
A__ : Any = get_test_classes(lowercase_ )
A__ : Optional[Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ , key=lambda lowercase_ : x.__name__ )
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Dict ) -> int:
A__ : Dict = get_test_classes_for_model(lowercase_ , lowercase_ )
A__ : Optional[Any] = []
for test_class in test_classes:
A__ : Union[str, Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ , key=lambda lowercase_ : x.__name__ )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> int:
A__ : Optional[int] = get_test_classes(lowercase_ )
A__ : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def UpperCamelCase (lowercase_: int ) -> str:
A__ : Union[str, Any] = get_model_classes(lowercase_ )
A__ : List[str] = {
model_class: get_test_classes_for_model(lowercase_ , lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Optional[int]:
A__ : Optional[int] = get_model_classes(lowercase_ )
A__ : Optional[Any] = {
model_class: get_tester_classes_for_model(lowercase_ , lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCamelCase (lowercase_: Tuple ) -> str:
if isinstance(lowercase_ , lowercase_ ):
return o
elif isinstance(lowercase_ , lowercase_ ):
return o.__name__
elif isinstance(lowercase_ , (list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ , lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: int = '''facebook/bart-large-mnli'''
UpperCAmelCase__: Tuple = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
UpperCAmelCase__: Dict = '''text_classifier'''
UpperCAmelCase__: Optional[Any] = AutoTokenizer
UpperCAmelCase__: List[Any] = AutoModelForSequenceClassification
UpperCAmelCase__: List[Any] = ['''text''', ['''text''']]
UpperCAmelCase__: Optional[Any] = ['''text''']
def __A ( self ):
super().setup()
A__ : List[str] = self.model.config
A__ : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
A__ : str = int(A__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __A ( self , A__ , A__ ):
A__ : Tuple = labels
return self.pre_processor(
[text] * len(A__ ) , [F"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __A ( self , A__ ):
A__ : List[Any] = outputs.logits
A__ : int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 704 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = LongformerTokenizer
UpperCAmelCase__: Optional[Any] = True
UpperCAmelCase__: Union[str, Any] = LongformerTokenizerFast
UpperCAmelCase__: Optional[Any] = True
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A__ : Optional[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A__ : str = {"""unk_token""": """<unk>"""}
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
def __A ( self , **A__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , **A__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , A__ ):
A__ : Dict = """lower newer"""
A__ : Dict = """lower newer"""
return input_text, output_text
def __A ( self ):
A__ : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ : Union[str, Any] = """lower newer"""
A__ : Dict = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A__ : List[Any] = tokenizer.tokenize(A__ ) # , add_prefix_space=True)
self.assertListEqual(A__ , A__ )
A__ : Dict = tokens + [tokenizer.unk_token]
A__ : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def __A ( self ):
A__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A__ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A__ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def __A ( self ):
A__ : Tuple = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
A__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
A__ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
A__ : Any = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : Tuple = tokenizer.build_inputs_with_special_tokens(A__ )
A__ : List[Any] = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __A ( self ):
A__ : Any = self.get_tokenizer()
A__ : str = """Encode this sequence."""
A__ : List[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
A__ : Optional[Any] = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A__ , A__ )
A__ : str = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A__ , A__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
A__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
A__ : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A__ , A__ )
# Testing spaces after special tokens
A__ : Dict = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A__ , lstrip=A__ , rstrip=A__ )} ) # mask token has a left space
A__ : Dict = tokenizer.convert_tokens_to_ids(A__ )
A__ : List[Any] = """Encode <mask> sequence"""
A__ : Tuple = """Encode <mask>sequence"""
A__ : Optional[int] = tokenizer.encode(A__ )
A__ : Union[str, Any] = encoded.index(A__ )
A__ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A__ , A__ )
A__ : Optional[int] = tokenizer.encode(A__ )
A__ : Union[str, Any] = encoded.index(A__ )
A__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A__ , A__ )
def __A ( self ):
pass
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : List[Any] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : int = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
A__ : Union[str, Any] = tokenizer_r.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
A__ : Dict = tokenizer_p.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A__ : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
A__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __A ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A__ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A__ )
self.assertEqual(post_processor_state["""trim_offsets"""] , A__ )
def __A ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A__ : int = F"""{text_of_1_token} {text_of_1_token}"""
A__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Union[str, Any] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
A__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Union[str, Any] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
A__ : List[str] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : str = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ), len(A__ ) + 1 + len(A__ )) , )
A__ : str = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Tuple = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ), len(A__ ) + 1 + len(A__ )) , )
A__ : Any = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Optional[Any] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ) + 1, 1 + len(A__ ) + 1 + len(A__ )) , )
A__ : int = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Any = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ), 1 + len(A__ ) + 1 + len(A__ )) , )
A__ : int = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : List[str] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ), 1 + len(A__ ) + 1 + len(A__ )) , )
| 705 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 0 |
import math
class _a :
'''simple docstring'''
def __init__( self , A__=0 ): # a graph with Node 0,1,...,N-1
A__ : int = n
A__ : Union[str, Any] = [
[math.inf for j in range(0 , A__ )] for i in range(0 , A__ )
] # adjacency matrix for weight
A__ : str = [
[math.inf for j in range(0 , A__ )] for i in range(0 , A__ )
] # dp[i][j] stores minimum distance from i to j
def __A ( self , A__ , A__ , A__ ):
A__ : str = w
def __A ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __A ( self , A__ , A__ ):
return self.dp[u][v]
if __name__ == "__main__":
A_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 706 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=400 , A__=True , A__=32 , A__=True , ):
A__ : List[Any] = parent
A__ : List[str] = batch_size
A__ : Tuple = num_channels
A__ : int = image_size
A__ : Any = min_resolution
A__ : str = max_resolution
A__ : List[Any] = do_resize
A__ : str = size_divisor
A__ : Optional[int] = do_rescale
def __A ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = GLPNImageProcessor if is_vision_available() else None
def __A ( self ):
A__ : str = GLPNImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size_divisor""" ) )
self.assertTrue(hasattr(A__ , """resample""" ) )
self.assertTrue(hasattr(A__ , """do_rescale""" ) )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
A__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self ):
# Initialize image_processing
A__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 707 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=30 , A__=400 , A__=True , A__=None , A__=0.9 , A__=None , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , ):
A__ : List[Any] = size if size is not None else {"""shortest_edge""": 30}
A__ : Any = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
A__ : Union[str, Any] = parent
A__ : Optional[int] = batch_size
A__ : Any = num_channels
A__ : List[Any] = min_resolution
A__ : Optional[Any] = max_resolution
A__ : List[str] = do_resize_and_center_crop
A__ : int = size
A__ : Tuple = crop_pct
A__ : List[Any] = crop_size
A__ : int = do_normalize
A__ : str = image_mean
A__ : Any = image_std
def __A ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Tuple = PoolFormerImageProcessor if is_vision_available() else None
def __A ( self ):
A__ : Tuple = PoolFormerImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
self.assertTrue(hasattr(A__ , """crop_pct""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
def __A ( self ):
A__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
A__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
A__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : List[str] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
A__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
A__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : List[Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
A__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
A__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : Union[str, Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 708 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
A_ : Any = '__DUMMY_TRANSFORMERS_USER__'
A_ : Tuple = 'Dummy User'
A_ : Any = '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'
A_ : str = 'https://hub-ci.huggingface.co'
A_ : Any = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
A_ : List[str] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
A_ : Any = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def UpperCamelCase (lowercase_: Union[str, Any] ) -> List[str]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , lowercase_ )
@pytest.fixture
def UpperCamelCase (lowercase_: Union[str, Any] ) -> List[str]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , lowercase_ )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , lowercase_ )
@pytest.fixture
def UpperCamelCase (lowercase_: Dict ) -> List[str]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , lowercase_ )
@pytest.fixture
def UpperCamelCase (lowercase_: Dict , lowercase_: str ) -> Tuple:
HfFolder.save_token(lowercase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def UpperCamelCase () -> str:
return HfApi(endpoint=lowercase_ )
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: HfApi ) -> Optional[int]:
A__ : Optional[int] = HfFolder.get_token()
HfFolder.save_token(lowercase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowercase_ )
@pytest.fixture
def UpperCamelCase (lowercase_: Any ) -> int:
def _cleanup_repo(lowercase_: List[str] ):
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def UpperCamelCase (lowercase_: str ) -> Dict:
@contextmanager
def _temporary_repo(lowercase_: Tuple ):
try:
yield repo_id
finally:
cleanup_repo(lowercase_ )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: HfApi , lowercase_: Optional[Any] , lowercase_: List[str] ) -> Optional[int]:
A__ : Dict = f"""repo_txt_data-{int(time.time() * 10E3 )}"""
A__ : Optional[int] = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo="""data/text_data.txt""" , repo_id=lowercase_ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Union[str, Any] , lowercase_: int ) -> int:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: HfApi , lowercase_: List[str] , lowercase_: int ) -> int:
A__ : Optional[int] = f"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
A__ : str = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo="""data.zip""" , repo_id=lowercase_ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[Any] , lowercase_: Dict ) -> List[Any]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: HfApi , lowercase_: List[Any] , lowercase_: List[str] ) -> int:
A__ : Dict = f"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
A__ : Tuple = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo="""data.zip""" , repo_id=lowercase_ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase (lowercase_: List[str] , lowercase_: List[str] , lowercase_: Any ) -> Optional[int]:
return hf_private_dataset_repo_zipped_img_data_
| 709 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCamelCase (lowercase_: int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(lowercase_ , lowercase_ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowercase_ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
A__ : Any = QuantumRegister(lowercase_ , """qr""" )
A__ : Any = ClassicalRegister(lowercase_ , """cr""" )
A__ : Optional[Any] = QuantumCircuit(lowercase_ , lowercase_ )
A__ : Optional[Any] = number_of_qubits
for i in range(lowercase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase_ , lowercase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase_ , lowercase_ )
# simulate with 10000 shots
A__ : int = Aer.get_backend("""qasm_simulator""" )
A__ : List[Any] = execute(lowercase_ , lowercase_ , shots=10000 )
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 710 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 0 |
def UpperCamelCase (lowercase_: list ) -> list:
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
A__ : Dict = []
def generate(lowercase_: int , lowercase_: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A__ : List[str] = arr[k - 1], arr[0]
generate(k - 1 , lowercase_ )
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
A_ : int = input('Enter numbers separated by a comma:\n').strip()
A_ : Any = [int(item) for item in user_input.split(',')]
print(heaps(arr)) | 711 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 0 |
from __future__ import annotations
from collections import deque
class _a :
'''simple docstring'''
def __init__( self , A__ ):
A__ : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(A__ )
self.set_fail_transitions()
def __A ( self , A__ , A__ ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __A ( self , A__ ):
A__ : Any = 0
for character in keyword:
A__ : int = self.find_next_state(A__ , A__ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
A__ : List[Any] = len(self.adlist ) - 1
else:
A__ : Optional[int] = next_state
self.adlist[current_state]["output"].append(A__ )
def __A ( self ):
A__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(A__ )
A__ : Tuple = 0
while q:
A__ : List[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A__ )
A__ : Dict = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(A__ , self.adlist[child]["""value"""] ) is None
and state != 0
):
A__ : Tuple = self.adlist[state]["""fail_state"""]
A__ : Optional[int] = self.find_next_state(
A__ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
A__ : int = 0
A__ : Union[str, Any] = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def __A ( self , A__ ):
A__ : dict = {} # returns a dict with keywords and list of its occurrences
A__ : List[str] = 0
for i in range(len(A__ ) ):
while (
self.find_next_state(A__ , string[i] ) is None
and current_state != 0
):
A__ : int = self.adlist[current_state]["""fail_state"""]
A__ : Optional[int] = self.find_next_state(A__ , string[i] )
if next_state is None:
A__ : Optional[int] = 0
else:
A__ : List[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
A__ : List[Any] = []
result[key].append(i - len(A__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 0 |
from math import pi, sqrt, tan
def UpperCamelCase (lowercase_: float ) -> float:
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCamelCase (lowercase_: float ) -> float:
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def UpperCamelCase (lowercase_: float ) -> float:
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
A__ : int = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(lowercase_ , 2 ) * torus_radius * tube_radius
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def UpperCamelCase (lowercase_: float ) -> float:
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
A__ : Optional[Any] = (sidea + sidea + sidea) / 2
A__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def UpperCamelCase (lowercase_: float ) -> float:
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCamelCase (lowercase_: int , lowercase_: float ) -> float:
if not isinstance(lowercase_ , lowercase_ ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 713 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A_ : int = datasets.utils.logging.get_logger(__name__)
A_ : str = ['names', 'prefix']
A_ : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
A_ : List[Any] = ['encoding_errors', 'on_bad_lines']
A_ : Optional[int] = ['date_format']
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: str = ","
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[Union[int, List[int], str]] = "infer"
UpperCAmelCase__: Optional[List[str]] = None
UpperCAmelCase__: Optional[List[str]] = None
UpperCAmelCase__: Optional[Union[int, str, List[int], List[str]]] = None
UpperCAmelCase__: Optional[Union[List[int], List[str]]] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True
UpperCAmelCase__: Optional[Literal["c", "python", "pyarrow"]] = None
UpperCAmelCase__: Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCAmelCase__: Optional[list] = None
UpperCAmelCase__: Optional[list] = None
UpperCAmelCase__: bool = False
UpperCAmelCase__: Optional[Union[int, List[int]]] = None
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Optional[Union[str, List[str]]] = None
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = False
UpperCAmelCase__: bool = True
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: str = "."
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: str = '"'
UpperCAmelCase__: int = 0
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = True
UpperCAmelCase__: int = 0
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = False
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: int = 1_00_00
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: Optional[str] = "strict"
UpperCAmelCase__: Literal["error", "warn", "skip"] = "error"
UpperCAmelCase__: Optional[str] = None
def __A ( self ):
if self.delimiter is not None:
A__ : Any = self.delimiter
if self.column_names is not None:
A__ : Optional[Any] = self.column_names
@property
def __A ( self ):
A__ : Tuple = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , A__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: Tuple = CsvConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Dict = data_files
if isinstance(A__ , A__ ):
A__ : Any = [files]
A__ : Dict = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Dict = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
A__ : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(A__ ) for feature in self.config.features.values() ):
# cheaper cast
A__ : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=A__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , A__ )
return pa_table
def __A ( self , A__ ):
A__ : Any = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A__ : Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
A__ : Union[str, Any] = pd.read_csv(A__ , iterator=A__ , dtype=A__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(A__ ):
A__ : Optional[int] = pa.Table.from_pandas(A__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise
| 714 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
A_ : List[Any] = logging.getLogger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ , A__=None ):
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
A__ : List[str] = None
def __A ( self , A__ ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
A__ : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
A__ : Tuple = str(distributed_port + 1 )
A__ : Dict = dist.new_group(ranks=A__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self , A__ , A__ , A__=torch.floataa ):
A__ : Tuple = torch.empty(A__ , dtype=A__ )
dist.scatter(A__ , src=0 , scatter_list=A__ , group=self.process_group )
return target_tensor
def __A ( self ):
A__ : Union[str, Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A__ : Optional[int] = next((addr for addr in addrs if addr.startswith("""e""" )) , A__ )
return ifname
def __A ( self , A__ , A__ ):
# single GPU training
if not dist.is_initialized():
A__ : str = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
# distributed training
A__ : str = dist.get_world_size(group=self.process_group )
# gather logic
A__ : Dict = None
if self._is_main():
A__ : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A__ )]
dist.gather(torch.tensor(A__ ) , dst=0 , gather_list=A__ , group=self.process_group )
# scatter logic
A__ : Union[str, Any] = question_hidden_states.shape[0]
A__ : List[str] = []
A__ : Union[str, Any] = []
if self._is_main():
assert len(A__ ) == world_size
A__ : Any = self._main_retrieve(torch.cat(A__ ).numpy() , A__ )
A__ : int = torch.tensor(A__ ), torch.tensor(A__ )
A__ : int = self._chunk_tensor(A__ , A__ )
A__ : Dict = self._chunk_tensor(A__ , A__ )
A__ : Optional[int] = self._scattered(A__ , [n_queries, n_docs] , target_type=torch.intaa )
A__ : Optional[int] = self._scattered(A__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A__ )
| 715 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger('transformers.models.speecht5')
A_ : List[str] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
A_ : int = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
A_ : str = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
A_ : Any = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
A_ : Any = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
A_ : int = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
A_ : Any = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
A_ : Optional[Any] = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
A_ : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A_ : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ : int = []
A_ : Optional[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
A_ : Optional[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
A_ : Union[str, Any] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
A_ : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def UpperCamelCase (lowercase_: List[str] , lowercase_: str , lowercase_: str , lowercase_: Union[str, Any] , lowercase_: Optional[int] ) -> int:
for attribute in key.split(""".""" ):
A__ : Dict = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
A__ : Tuple = getattr(lowercase_ , lowercase_ ).shape
else:
A__ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A__ : Any = value
elif weight_type == "weight_g":
A__ : Union[str, Any] = value
elif weight_type == "weight_v":
A__ : Optional[Any] = value
elif weight_type == "bias":
A__ : str = value
elif weight_type == "running_mean":
A__ : List[Any] = value
elif weight_type == "running_var":
A__ : str = value
elif weight_type == "num_batches_tracked":
A__ : List[Any] = value
else:
A__ : Optional[Any] = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Optional[int] ) -> str:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase (lowercase_: Dict , lowercase_: List[str] , lowercase_: Optional[Any] ) -> Optional[int]:
A__ : Any = []
if task == "s2t":
A__ : Any = hf_model.speechta.encoder.prenet.feature_encoder
A__ : Optional[Any] = MAPPING_S2T
A__ : List[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Any = None
A__ : Optional[int] = MAPPING_T2S
A__ : Optional[int] = IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
A__ : List[str] = MAPPING_S2S
A__ : Dict = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowercase_ , lowercase_ ):
logger.info(f"""{name} was ignored""" )
continue
A__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , )
A__ : Any = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ : Tuple = key.split(""".*.""" )
if prefix in name and suffix in name:
A__ : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : Dict = True
if "*" in mapped_key:
A__ : int = name.split(lowercase_ )[0].split(""".""" )[-2]
A__ : List[str] = mapped_key.replace("""*""" , lowercase_ )
if "weight_g" in name:
A__ : Optional[Any] = """weight_g"""
elif "weight_v" in name:
A__ : List[Any] = """weight_v"""
elif "bias" in name:
A__ : int = """bias"""
elif "weight" in name:
A__ : Any = """weight"""
elif "running_mean" in name:
A__ : Optional[Any] = """running_mean"""
elif "running_var" in name:
A__ : Tuple = """running_var"""
elif "num_batches_tracked" in name:
A__ : Dict = """num_batches_tracked"""
else:
A__ : Union[str, Any] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Any , lowercase_: Optional[Any] , lowercase_: str , lowercase_: Any ) -> List[str]:
A__ : int = full_name.split("""conv_layers.""" )[-1]
A__ : Optional[int] = name.split(""".""" )
A__ : Any = int(items[0] )
A__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
A__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
A__ : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
A__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
A__ : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int , lowercase_: str , lowercase_: List[Any]=None , lowercase_: Tuple=None , lowercase_: Dict=None , ) -> Union[str, Any]:
if config_path is not None:
A__ : Tuple = SpeechTaConfig.from_pretrained(lowercase_ )
else:
A__ : Tuple = SpeechTaConfig()
if task == "s2t":
A__ : Optional[int] = config.max_text_positions
A__ : Any = SpeechTaForSpeechToText(lowercase_ )
elif task == "t2s":
A__ : List[Any] = 1876
A__ : str = 600
A__ : List[Any] = config.max_speech_positions
A__ : Tuple = SpeechTaForTextToSpeech(lowercase_ )
elif task == "s2s":
A__ : Dict = 1876
A__ : int = config.max_speech_positions
A__ : Any = SpeechTaForSpeechToSpeech(lowercase_ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
A__ : Any = SpeechTaTokenizer(lowercase_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] = AddedToken("""<mask>""" , lstrip=lowercase_ , rstrip=lowercase_ )
A__ : List[Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
A__ : Dict = SpeechTaFeatureExtractor()
A__ : List[Any] = SpeechTaProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(lowercase_ )
A__ : List[Any] = torch.load(lowercase_ )
recursively_load_weights(fairseq_checkpoint["""model"""] , lowercase_ , lowercase_ )
model.save_pretrained(lowercase_ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
A_ : List[str] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 716 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 0 |
import os
import pytest
from attr import dataclass
A_ : Union[str, Any] = 'us-east-1' # defaults region
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: str
UpperCAmelCase__: Any = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
UpperCAmelCase__: Tuple = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_00,
'''save_steps''': 55_00,
}
UpperCAmelCase__: str = {**hyperparameters, '''max_steps''': 10_00}
@property
def __A ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __A ( self ):
return F"""{self.framework}-transfromers-test"""
@property
def __A ( self ):
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def __A ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def UpperCamelCase (lowercase_: Tuple ) -> List[str]:
A__ : str = SageMakerTestEnvironment(framework=request.cls.framework )
| 718 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> list:
A__ : List[str] = len(lowercase_ )
A__ : Optional[int] = []
for i in range(len(lowercase_ ) - pat_len + 1 ):
A__ : int = True
for j in range(lowercase_ ):
if s[i + j] != pattern[j]:
A__ : List[str] = False
break
if match_found:
position.append(lowercase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 719 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ : Union[str, Any] = random.Random()
def UpperCamelCase (lowercase_: str , lowercase_: Tuple=1.0 , lowercase_: Optional[Any]=None , lowercase_: List[Any]=None ) -> Optional[Any]:
if rng is None:
A__ : Union[str, Any] = global_rng
A__ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=10 , A__=160 , A__=8 , A__=0.0 , A__=4000 , A__=False , A__=True , ):
A__ : Union[str, Any] = parent
A__ : Dict = batch_size
A__ : int = min_seq_length
A__ : Optional[Any] = max_seq_length
A__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : List[str] = padding_value
A__ : List[str] = sampling_rate
A__ : Optional[int] = return_attention_mask
A__ : Any = do_normalize
A__ : Dict = feature_size
A__ : Optional[Any] = chunk_length
A__ : Any = hop_length
def __A ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Tuple = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = WhisperFeatureExtractor if is_speech_available() else None
def __A ( self ):
A__ : Dict = WhisperFeatureExtractionTester(self )
def __A ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Union[str, Any] = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(A__ )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : Optional[int] = feat_extract_first.mel_filters
A__ : List[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Union[str, Any] = os.path.join(A__ , """feat_extract.json""" )
feat_extract_first.to_json_file(A__ )
A__ : str = self.feature_extraction_class.from_json_file(A__ )
A__ : Dict = feat_extract_first.to_dict()
A__ : Tuple = feat_extract_second.to_dict()
A__ : Union[str, Any] = feat_extract_first.mel_filters
A__ : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : int = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test feature size
A__ : Union[str, Any] = feature_extractor(A__ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Any = feature_extractor(A__ , return_tensors="""np""" ).input_features
A__ : Optional[Any] = feature_extractor(A__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[Any] = np.asarray(A__ )
A__ : List[str] = feature_extractor(A__ , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(A__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test truncation required
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Tuple = [np.asarray(A__ ) for speech_input in speech_inputs]
A__ : List[str] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : List[str] = [np.asarray(A__ ) for speech_input in speech_inputs_truncated]
A__ : Any = feature_extractor(A__ , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(A__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
def __A ( self ):
import torch
A__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : str = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , A__ ):
A__ : Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Optional[int] = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __A ( self ):
# fmt: off
A__ : Optional[int] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Optional[Any] = WhisperFeatureExtractor()
A__ : Union[str, Any] = feature_extractor(A__ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A__ , atol=1e-4 ) )
def __A ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Optional[Any] = self._load_datasamples(1 )[0]
A__ : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A__ )[0]
self.assertTrue(np.all(np.mean(A__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A__ ) - 1 ) < 1e-3 ) )
| 720 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: float
UpperCAmelCase__: TreeNode | None = None
UpperCAmelCase__: TreeNode | None = None
def UpperCamelCase (lowercase_: TreeNode | None ) -> bool:
# Validation
def is_valid_tree(lowercase_: TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(lowercase_ , lowercase_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowercase_ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
lowercase_: TreeNode | None , lowercase_: float , lowercase_: float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowercase_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowercase_ )
)
return is_binary_search_tree_recursive_check(lowercase_ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : str = tempfile.mkdtemp()
# fmt: off
A__ : Tuple = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ : Optional[int] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Union[str, Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ : Tuple = {"""unk_token""": """<unk>"""}
A__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
A__ : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ : Tuple = os.path.join(self.tmpdirname , A__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A__ , A__ )
def __A ( self , **A__ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , **A__ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , **A__ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
A__ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : int = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
A__ : int = self.get_tokenizer()
A__ : int = self.get_rust_tokenizer()
A__ : Any = self.get_image_processor()
A__ : List[Any] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
processor_slow.save_pretrained(self.tmpdirname )
A__ : int = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A__ )
A__ : int = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
processor_fast.save_pretrained(self.tmpdirname )
A__ : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A__ )
self.assertIsInstance(processor_fast.tokenizer , A__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A__ )
self.assertIsInstance(processor_fast.image_processor , A__ )
def __A ( self ):
A__ : Union[str, Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ : Optional[Any] = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
A__ : Dict = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def __A ( self ):
A__ : Dict = self.get_image_processor()
A__ : Optional[Any] = self.get_tokenizer()
A__ : int = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
A__ : Optional[int] = self.prepare_image_inputs()
A__ : str = image_processor(A__ , return_tensors="""np""" )
A__ : Optional[int] = processor(images=A__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ):
A__ : Union[str, Any] = self.get_image_processor()
A__ : List[Any] = self.get_tokenizer()
A__ : Optional[int] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
A__ : Dict = """lower newer"""
A__ : Dict = processor(text=A__ )
A__ : Union[str, Any] = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ):
A__ : List[str] = self.get_image_processor()
A__ : Optional[int] = self.get_tokenizer()
A__ : Union[str, Any] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
A__ : List[Any] = """lower newer"""
A__ : Optional[int] = self.prepare_image_inputs()
A__ : Dict = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __A ( self ):
A__ : int = self.get_image_processor()
A__ : Any = self.get_tokenizer()
A__ : Optional[Any] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
A__ : List[Any] = self.prepare_image_inputs()
A__ : int = self.prepare_image_inputs()
A__ : Union[str, Any] = processor(images=A__ , visual_prompt=A__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __A ( self ):
A__ : Optional[Any] = self.get_image_processor()
A__ : str = self.get_tokenizer()
A__ : Tuple = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
A__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : List[Any] = processor.batch_decode(A__ )
A__ : int = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
| 700 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Any , lowercase_: int ) -> Optional[int]:
A__ : int = multiprocessing.Manager()
A__ : Optional[Any] = manager.list()
A__ : List[str] = multiprocessing.Process(target=lowercase_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCamelCase (lowercase_: Any , lowercase_: Dict , lowercase_: Dict ) -> Union[str, Any]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ : Optional[int] = shutil.rmtree
A__ : List[str] = os.rmdir
A__ : List[Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ : Optional[Any] = {}
with swallow_io():
with time_limit(lowercase_ ):
exec(lowercase_ , lowercase_ )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
A__ : Dict = rmtree
A__ : Any = rmdir
A__ : Tuple = chdir
@contextlib.contextmanager
def UpperCamelCase (lowercase_: str ) -> Optional[int]:
def signal_handler(lowercase_: Tuple , lowercase_: Union[str, Any] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , lowercase_ )
signal.signal(signal.SIGALRM , lowercase_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def UpperCamelCase () -> List[Any]:
A__ : int = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase_ ):
with contextlib.redirect_stderr(lowercase_ ):
with redirect_stdin(lowercase_ ):
yield
@contextlib.contextmanager
def UpperCamelCase () -> List[Any]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase_ ):
yield dirname
class _a (__magic_name__ ):
'''simple docstring'''
pass
class _a (io.StringIO ):
'''simple docstring'''
def __A ( self , *A__ , **A__ ):
raise OSError
def __A ( self , *A__ , **A__ ):
raise OSError
def __A ( self , *A__ , **A__ ):
raise OSError
def __A ( self , *A__ , **A__ ):
return False
class _a (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
UpperCAmelCase__: Any = '''stdin'''
@contextlib.contextmanager
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[Any]:
if root == ".":
yield
return
A__ : List[str] = os.getcwd()
os.chdir(lowercase_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase_ )
def UpperCamelCase (lowercase_: List[Any]=None ) -> Tuple:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ : Dict = None
A__ : List[Any] = None
import os
A__ : List[str] = """1"""
A__ : Optional[Any] = None
A__ : List[Any] = None
A__ : Tuple = None
A__ : List[Any] = None
A__ : int = None
A__ : List[str] = None
A__ : Union[str, Any] = None
A__ : Any = None
A__ : Any = None
A__ : Optional[int] = None
A__ : Union[str, Any] = None
A__ : Tuple = None
A__ : Tuple = None
A__ : Any = None
A__ : Dict = None
A__ : Union[str, Any] = None
A__ : Union[str, Any] = None
A__ : str = None
A__ : Any = None
A__ : List[str] = None
A__ : Tuple = None
A__ : List[Any] = None
A__ : Optional[Any] = None
A__ : Union[str, Any] = None
A__ : Union[str, Any] = None
A__ : Any = None
A__ : int = None
import shutil
A__ : List[Any] = None
A__ : List[str] = None
A__ : Union[str, Any] = None
import subprocess
A__ : Optional[Any] = None # type: ignore
A__ : List[str] = None
import sys
A__ : List[Any] = None
A__ : Any = None
A__ : int = None
A__ : List[str] = None
A__ : List[Any] = None
| 701 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = ['''pixel_values''']
def __init__( self , A__ = True , A__ = 32 , A__=PILImageResampling.BILINEAR , A__ = True , **A__ , ):
A__ : int = do_resize
A__ : Tuple = do_rescale
A__ : Tuple = size_divisor
A__ : List[Any] = resample
super().__init__(**A__ )
def __A ( self , A__ , A__ , A__ , A__ = None , **A__ ):
A__ : str = get_image_size(A__ )
# Rounds the height and width down to the closest multiple of size_divisor
A__ : List[str] = height // size_divisor * size_divisor
A__ : List[Any] = width // size_divisor * size_divisor
A__ : List[Any] = resize(A__ , (new_h, new_w) , resample=A__ , data_format=A__ , **A__ )
return image
def __A ( self , A__ , A__ , A__ = None , **A__ ):
return rescale(image=A__ , scale=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ = None , A__ = None , A__=None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
A__ : Any = do_resize if do_resize is not None else self.do_resize
A__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A__ : int = size_divisor if size_divisor is not None else self.size_divisor
A__ : Optional[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
A__ : str = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
A__ : int = [to_numpy_array(A__ ) for img in images]
if do_resize:
A__ : Optional[Any] = [self.resize(A__ , size_divisor=A__ , resample=A__ ) for image in images]
if do_rescale:
A__ : List[Any] = [self.rescale(A__ , scale=1 / 255 ) for image in images]
A__ : Union[str, Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
A__ : int = {"""pixel_values""": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 702 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : Tuple = logging.get_logger(__name__)
A_ : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
A_ : int = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = VOCAB_FILES_NAMES
UpperCAmelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: List[Any] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__: Tuple = GPTaTokenizer
def __init__( self , A__=None , A__=None , A__=None , A__="<|endoftext|>" , A__="<|endoftext|>" , A__="<|endoftext|>" , A__=False , **A__ , ):
super().__init__(
A__ , A__ , tokenizer_file=A__ , unk_token=A__ , bos_token=A__ , eos_token=A__ , add_prefix_space=A__ , **A__ , )
A__ : Union[str, Any] = kwargs.pop("""add_bos_token""" , A__ )
A__ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(A__ , pre_tok_state.pop("""type""" ) )
A__ : Tuple = add_prefix_space
A__ : Dict = pre_tok_class(**A__ )
A__ : Dict = add_prefix_space
def __A ( self , *A__ , **A__ ):
A__ : Any = kwargs.get("""is_split_into_words""" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
A__ : Optional[Any] = kwargs.get("""is_split_into_words""" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def __A ( self , A__ , A__ = None ):
A__ : Optional[int] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def __A ( self , A__ ):
A__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A__ , add_special_tokens=A__ ) + [self.eos_token_id] )
if len(A__ ) > self.model_max_length:
A__ : str = input_ids[-self.model_max_length :]
return input_ids
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 704 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: Dict , lowercase_: Tuple=False ) -> str:
A__ : Optional[int] = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase (lowercase_: Dict , lowercase_: Tuple , lowercase_: Optional[int]=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Optional[int] = """"""
else:
A__ : List[str] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A__ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ : str = in_proj_weight[
: config.hidden_size, :
]
A__ : Any = in_proj_bias[: config.hidden_size]
A__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str = in_proj_weight[
-config.hidden_size :, :
]
A__ : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCamelCase (lowercase_: List[Any] ) -> Optional[Any]:
A__ : int = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: int , lowercase_: Any , lowercase_: str ) -> Dict:
A__ : Optional[Any] = dct.pop(lowercase_ )
A__ : Tuple = val
def UpperCamelCase () -> Tuple:
A__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : str = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: str=False ) -> str:
A__ : Dict = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=lowercase_ , )
A__ : List[Any] = ViTHybridConfig(backbone_config=lowercase_ , image_size=384 , num_labels=1000 )
A__ : Any = False
# load original model from timm
A__ : List[Any] = timm.create_model(lowercase_ , pretrained=lowercase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase_ )
A__ : Any = create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ , lowercase_ )
A__ : Any = """huggingface/label-files"""
A__ : Optional[int] = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
A__ : Union[str, Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : Tuple = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ : Union[str, Any] = ViTHybridModel(lowercase_ ).eval()
else:
A__ : Dict = ViTHybridForImageClassification(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
# create image processor
A__ : List[Any] = create_transform(**resolve_data_config({} , model=lowercase_ ) )
A__ : int = transform.transforms
A__ : str = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
A__ : Tuple = ViTHybridImageProcessor(
do_resize=lowercase_ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase_ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ : Any = prepare_img()
A__ : List[str] = transform(lowercase_ ).unsqueeze(0 )
A__ : List[Any] = processor(lowercase_ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase_ , lowercase_ )
# verify logits
with torch.no_grad():
A__ : Optional[int] = model(lowercase_ )
A__ : str = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
A__ : Any = timm_model.forward_features(lowercase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase_ , outputs.pooler_output , atol=1E-3 )
else:
A__ : Optional[int] = timm_model(lowercase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase_ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase_ )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
A_ : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 705 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase ( lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase ( lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase ( lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase ( lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 706 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Tuple = '''xlm-roberta'''
def __init__( self , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=3072 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=1 , A__=0 , A__=2 , A__="absolute" , A__=True , A__=None , **A__ , ):
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
A__ : Optional[int] = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : str = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : Tuple = hidden_act
A__ : Optional[int] = intermediate_size
A__ : Tuple = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = type_vocab_size
A__ : Tuple = initializer_range
A__ : Dict = layer_norm_eps
A__ : List[str] = position_embedding_type
A__ : Union[str, Any] = use_cache
A__ : Union[str, Any] = classifier_dropout
class _a (__magic_name__ ):
'''simple docstring'''
@property
def __A ( self ):
if self.task == "multiple-choice":
A__ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 707 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A_ = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = '''albert'''
def __init__( self , A__=3_0000 , A__=128 , A__=4096 , A__=12 , A__=1 , A__=64 , A__=1_6384 , A__=1 , A__="gelu_new" , A__=0 , A__=0 , A__=512 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=0.1 , A__="absolute" , A__=0 , A__=2 , A__=3 , **A__ , ):
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
A__ : Tuple = vocab_size
A__ : str = embedding_size
A__ : List[Any] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : Dict = num_hidden_groups
A__ : List[str] = num_attention_heads
A__ : List[Any] = inner_group_num
A__ : Optional[int] = hidden_act
A__ : Any = intermediate_size
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : str = max_position_embeddings
A__ : Union[str, Any] = type_vocab_size
A__ : str = initializer_range
A__ : Tuple = layer_norm_eps
A__ : Optional[Any] = classifier_dropout_prob
A__ : Any = position_embedding_type
class _a (__magic_name__ ):
'''simple docstring'''
@property
def __A ( self ):
if self.task == "multiple-choice":
A__ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 708 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 0 |
from __future__ import annotations
def UpperCamelCase (lowercase_: str , lowercase_: list[str] | None = None ) -> list[list[str]]:
A__ : Dict = word_bank or []
# create a table
A__ : int = len(lowercase_ ) + 1
A__ : list[list[list[str]]] = []
for _ in range(lowercase_ ):
table.append([] )
# seed value
A__ : Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase_ )] == word:
A__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase_ )]:
combination.reverse()
return table[len(lowercase_ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 709 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 0 |
def UpperCamelCase (lowercase_: list[int] , lowercase_: str ) -> list[int]:
A__ : Optional[int] = int(lowercase_ )
# Initialize Result
A__ : Any = []
# Traverse through all denomination
for denomination in reversed(lowercase_ ):
# Find denominations
while int(lowercase_ ) >= int(lowercase_ ):
total_value -= int(lowercase_ )
answer.append(lowercase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ : Optional[Any] = []
A_ : Tuple = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
A_ : str = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
A_ : Any = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ : Dict = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
A_ : Any = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f'''Following is minimal change for {value}: ''')
A_ : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 710 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 0 |
def UpperCamelCase (lowercase_: list , lowercase_: int = 0 ) -> list:
A__ : Any = length or len(lowercase_ )
A__ : List[str] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A__ : Optional[Any] = list_data[i + 1], list_data[i]
A__ : Optional[int] = True
return list_data if not swapped else bubble_sort(lowercase_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 711 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 0 |
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Optional[int] , lowercase_: Optional[Any]=False ) -> Union[str, Any]:
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
A__ : Union[str, Any] = len(set_a.intersection(lowercase_ ) )
if alternative_union:
A__ : Union[str, Any] = len(lowercase_ ) + len(lowercase_ )
else:
A__ : Optional[int] = len(set_a.union(lowercase_ ) )
return intersection / union
if isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) ):
A__ : Any = [element for element in set_a if element in set_b]
if alternative_union:
A__ : Union[str, Any] = len(lowercase_ ) + len(lowercase_ )
return len(lowercase_ ) / union
else:
A__ : Optional[int] = set_a + [element for element in set_b if element not in set_a]
return len(lowercase_ ) / len(lowercase_ )
return len(lowercase_ ) / len(lowercase_ )
return None
if __name__ == "__main__":
A_ : Optional[Any] = {'a', 'b', 'c', 'd', 'e'}
A_ : List[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 712 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Dict = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: Optional[Any] ) -> str:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: Any ) -> Any:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
A__ : List[str] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 713 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 0 |
class _a : # Public class to implement a graph
'''simple docstring'''
def __init__( self , A__ , A__ , A__ ):
A__ : Union[str, Any] = row
A__ : int = col
A__ : Any = graph
def __A ( self , A__ , A__ , A__ ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __A ( self , A__ , A__ , A__ ):
# Checking all 8 elements surrounding nth element
A__ : Optional[int] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A__ : int = [-1, 0, 1, -1, 1, -1, 0, 1]
A__ : List[Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , A__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , A__ )
def __A ( self ): # And finally, count all islands.
A__ : Union[str, Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
A__ : Tuple = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(A__ , A__ , A__ )
count += 1
return count
| 714 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
A_ : List[str] = 3
def UpperCamelCase (lowercase_: int ) -> int:
print("""Generating primitive root of p""" )
while True:
A__ : Tuple = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def UpperCamelCase (lowercase_: int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
A__ : Union[str, Any] = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
A__ : Optional[int] = primitive_root(lowercase_ ) # one primitive root on modulo p.
A__ : List[Any] = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
A__ : Tuple = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
A__ : Union[str, Any] = (key_size, e_a, e_a, p)
A__ : str = (key_size, d)
return public_key, private_key
def UpperCamelCase (lowercase_: str , lowercase_: int ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
A__ : Dict = generate_key(lowercase_ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , """w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , """w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def UpperCamelCase () -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" , 2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 715 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 0 |
from math import factorial
def UpperCamelCase (lowercase_: int = 100 ) -> int:
return sum(map(lowercase_ , str(factorial(lowercase_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 716 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Union[str, Any] ) -> Union[str, Any]:
A__ : Optional[Any] = XCLIPTextConfig()
# derive patch size from model name
A__ : Any = model_name.find("""patch""" )
A__ : List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
A__ : List[Any] = XCLIPVisionConfig(patch_size=lowercase_ , num_frames=lowercase_ )
if "large" in model_name:
A__ : str = 768
A__ : str = 3072
A__ : Any = 12
A__ : Tuple = 1024
A__ : str = 4096
A__ : Any = 16
A__ : List[Any] = 24
A__ : Optional[Any] = 768
A__ : Any = 3072
if model_name == "xclip-large-patch14-16-frames":
A__ : List[Any] = 336
A__ : List[str] = XCLIPConfig.from_text_vision_configs(lowercase_ , lowercase_ )
if "large" in model_name:
A__ : Dict = 768
return config
def UpperCamelCase (lowercase_: Dict ) -> Dict:
# text encoder
if name == "token_embedding.weight":
A__ : Dict = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
A__ : Union[str, Any] = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
A__ : Optional[Any] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
A__ : Optional[int] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
A__ : Optional[Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
A__ : str = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
A__ : Dict = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
A__ : Optional[Any] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
A__ : List[Any] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
A__ : List[str] = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
A__ : Union[str, Any] = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
A__ : Any = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
A__ : str = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
A__ : List[str] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
A__ : str = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
A__ : Dict = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
A__ : Any = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
A__ : str = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
A__ : List[Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
A__ : List[str] = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
A__ : Union[str, Any] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
A__ : List[Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def UpperCamelCase (lowercase_: str , lowercase_: int ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
A__ : Dict = orig_state_dict.pop(lowercase_ )
if "attn.in_proj" in key:
A__ : Union[str, Any] = key.split(""".""" )
if key.startswith("""visual""" ):
A__ : Any = key_split[3]
A__ : Optional[int] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
A__ : List[Any] = val[
:dim, :
]
A__ : Optional[Any] = val[
dim : dim * 2, :
]
A__ : Tuple = val[
-dim:, :
]
else:
A__ : List[str] = val[
:dim
]
A__ : Union[str, Any] = val[
dim : dim * 2
]
A__ : List[Any] = val[
-dim:
]
else:
if "weight" in key:
A__ : List[Any] = val[
:dim, :
]
A__ : Union[str, Any] = val[
dim : dim * 2, :
]
A__ : List[str] = val[
-dim:, :
]
else:
A__ : Optional[Any] = val[:dim]
A__ : Dict = val[
dim : dim * 2
]
A__ : str = val[-dim:]
elif key.startswith("""mit""" ):
A__ : Optional[Any] = key_split[2]
A__ : str = config.vision_config.mit_hidden_size
if "weight" in key:
A__ : Dict = val[:dim, :]
A__ : int = val[dim : dim * 2, :]
A__ : List[str] = val[-dim:, :]
else:
A__ : List[str] = val[:dim]
A__ : List[Any] = val[dim : dim * 2]
A__ : Optional[Any] = val[-dim:]
else:
A__ : Optional[int] = key_split[2]
A__ : Dict = config.text_config.hidden_size
if "weight" in key:
A__ : int = val[:dim, :]
A__ : Any = val[
dim : dim * 2, :
]
A__ : List[str] = val[-dim:, :]
else:
A__ : Any = val[:dim]
A__ : Union[str, Any] = val[
dim : dim * 2
]
A__ : str = val[-dim:]
else:
A__ : List[Any] = rename_key(lowercase_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
A__ : List[Any] = val.T
A__ : List[Any] = val
return orig_state_dict
def UpperCamelCase (lowercase_: Optional[int] ) -> List[str]:
if num_frames == 8:
A__ : Tuple = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
A__ : Tuple = """eating_spaghetti.npy"""
elif num_frames == 32:
A__ : Optional[int] = """eating_spaghetti_32_frames.npy"""
A__ : Any = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowercase_ , repo_type="""dataset""" , )
A__ : str = np.load(lowercase_ )
return list(lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict=None , lowercase_: Optional[Any]=False ) -> Union[str, Any]:
A__ : int = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
A__ : List[Any] = model_to_url[model_name]
A__ : List[str] = 8
if "16-frames" in model_name:
A__ : Any = 16
elif "shot" in model_name:
A__ : Dict = 32
A__ : Any = get_xclip_config(lowercase_ , lowercase_ )
A__ : str = XCLIPModel(lowercase_ )
model.eval()
if "drive" in checkpoint_url:
A__ : Any = """pytorch_model.bin"""
gdown.cached_download(lowercase_ , lowercase_ , quiet=lowercase_ )
A__ : int = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
else:
A__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ )["""model"""]
A__ : List[Any] = convert_state_dict(lowercase_ , lowercase_ )
A__ : Tuple = XCLIPModel(lowercase_ )
A__ : List[Any] = model.load_state_dict(lowercase_ , strict=lowercase_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
A__ : List[Any] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
A__ : Optional[int] = VideoMAEImageProcessor(size=lowercase_ )
A__ : Union[str, Any] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
A__ : Tuple = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
A__ : Dict = XCLIPProcessor(image_processor=lowercase_ , tokenizer=lowercase_ )
A__ : str = prepare_video(lowercase_ )
A__ : Tuple = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowercase_ , return_tensors="""pt""" , padding=lowercase_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
A__ : List[str] = model(**lowercase_ )
# Verify outputs
A__ : int = outputs.logits_per_video
A__ : str = logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowercase_ )
# kinetics-400
if model_name == "xclip-base-patch32":
A__ : Optional[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
A__ : Union[str, Any] = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
A__ : Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
A__ : Union[str, Any] = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
A__ : Any = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
A__ : List[Any] = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
A__ : Tuple = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
A__ : Tuple = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
A__ : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
A__ : List[Any] = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
A__ : Optional[int] = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
A__ : Union[str, Any] = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
A__ : Any = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
A__ : Any = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
A__ : Optional[int] = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
A__ : Union[str, Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
A__ : List[Any] = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
A__ : Union[str, Any] = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowercase_ , organization="""nielsr""" )
processor.push_to_hub(lowercase_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowercase_ , organization="""nielsr""" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A_ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 717 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 718 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from datetime import datetime as dt
import os
from github import Github
A_ : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def UpperCamelCase () -> Dict:
A__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
A__ : Optional[int] = g.get_repo("""huggingface/transformers""" )
A__ : Tuple = repo.get_issues(state="""open""" )
for issue in open_issues:
A__ : List[str] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase_ : i.created_at , reverse=lowercase_ )
A__ : Any = comments[0] if len(lowercase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 719 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class _a :
'''simple docstring'''
UpperCAmelCase__: str
UpperCAmelCase__: str = None
@staticmethod
def __A ( ):
raise NotImplementedError
def __A ( self , A__ , A__ , A__ , **A__ ):
raise NotImplementedError
def __A ( self , A__ ):
raise NotImplementedError
def __A ( self ):
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def __A ( cls ):
return F"""`pip install {cls.pip_package or cls.name}`"""
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Tuple = '''optuna'''
@staticmethod
def __A ( ):
return is_optuna_available()
def __A ( self , A__ , A__ , A__ , **A__ ):
return run_hp_search_optuna(A__ , A__ , A__ , **A__ )
def __A ( self , A__ ):
return default_hp_space_optuna(A__ )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = '''ray'''
UpperCAmelCase__: Any = '''\'ray[tune]\''''
@staticmethod
def __A ( ):
return is_ray_available()
def __A ( self , A__ , A__ , A__ , **A__ ):
return run_hp_search_ray(A__ , A__ , A__ , **A__ )
def __A ( self , A__ ):
return default_hp_space_ray(A__ )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Tuple = '''sigopt'''
@staticmethod
def __A ( ):
return is_sigopt_available()
def __A ( self , A__ , A__ , A__ , **A__ ):
return run_hp_search_sigopt(A__ , A__ , A__ , **A__ )
def __A ( self , A__ ):
return default_hp_space_sigopt(A__ )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = '''wandb'''
@staticmethod
def __A ( ):
return is_wandb_available()
def __A ( self , A__ , A__ , A__ , **A__ ):
return run_hp_search_wandb(A__ , A__ , A__ , **A__ )
def __A ( self , A__ ):
return default_hp_space_wandb(A__ )
A_ : List[str] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase () -> str:
A__ : Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase_ ) > 0:
A__ : Optional[Any] = available_backends[0].name
if len(lowercase_ ) > 1:
logger.info(
f"""{len(lowercase_ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 720 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 0 |
def UpperCamelCase (lowercase_: int = 2000000 ) -> int:
A__ : Optional[int] = [0 for i in range(n + 1 )]
A__ : List[str] = 1
A__ : str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase_ ):
A__ : str = 1
A__ : int = 0
for i in range(lowercase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 721 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'openai-gpt'
__UpperCAmelCase : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=40_478 , _a=512 , _a=768 , _a=12 , _a=12 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a="cls_index" , _a=True , _a=None , _a=True , _a=0.1 , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = afn
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = summary_type
__a = summary_use_proj
__a = summary_activation
__a = summary_first_dropout
__a = summary_proj_to_labels
super().__init__(**_a )
| 65 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Any ) -> Any:
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = metric_id
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Any = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCAmelCase ( self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
if "tmp_path" in args:
__a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*lowerCAmelCase__ )
| 65 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.